From fedd0c15d2885e393d4ef4db818b462c3bbfc337 Mon Sep 17 00:00:00 2001 From: Salil Mehta Date: Thu, 14 Dec 2017 18:03:02 +0000 Subject: net: hns3: Add HNS3 VF IMP(Integrated Management Proc) cmd interface This patch adds support of command interface for communication with the IMP(Integrated Management Processor) for HNS3 Virtual Function Driver. Each VF has support of CQP(Command Queue Pair) ring interface. Each CQP consis of send queue CSQ and receive queue CRQ. There are various commands a VF may support, like to query frimware version, TQP management, statistics, interrupt related, mailbox etc. This also contains code to initialize the command queue, manage the command queue descriptors and Rx/Tx protocol with the command processor in the form of various commands/results and acknowledgements. Signed-off-by: Salil Mehta Signed-off-by: lipeng Signed-off-by: David S. Miller --- .../ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c | 342 +++++++++++++++++++++ .../ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h | 256 +++++++++++++++ 2 files changed, 598 insertions(+) create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c new file mode 100644 index 000000000000..85985e731311 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -0,0 +1,342 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include +#include +#include +#include +#include +#include "hclgevf_cmd.h" +#include "hclgevf_main.h" +#include "hnae3.h" + +#define hclgevf_is_csq(ring) ((ring)->flag & HCLGEVF_TYPE_CSQ) +#define hclgevf_ring_to_dma_dir(ring) (hclgevf_is_csq(ring) ? \ + DMA_TO_DEVICE : DMA_FROM_DEVICE) +#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev) + +static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring) +{ + int ntc = ring->next_to_clean; + int ntu = ring->next_to_use; + int used; + + used = (ntu - ntc + ring->desc_num) % ring->desc_num; + + return ring->desc_num - used - 1; +} + +static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw) +{ + struct hclgevf_cmq_ring *csq = &hw->cmq.csq; + u16 ntc = csq->next_to_clean; + struct hclgevf_desc *desc; + int clean = 0; + u32 head; + + desc = &csq->desc[ntc]; + head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG); + while (head != ntc) { + memset(desc, 0, sizeof(*desc)); + ntc++; + if (ntc == csq->desc_num) + ntc = 0; + desc = &csq->desc[ntc]; + clean++; + } + csq->next_to_clean = ntc; + + return clean; +} + +static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw) +{ + u32 head; + + head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG); + + return head == hw->cmq.csq.next_to_use; +} + +static bool hclgevf_is_special_opcode(u16 opcode) +{ + u16 spec_opcode[] = {0x30, 0x31, 0x32}; + int i; + + for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) { + if (spec_opcode[i] == opcode) + return true; + } + + return false; +} + +static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring) +{ + int size = ring->desc_num * sizeof(struct hclgevf_desc); + + ring->desc = kzalloc(size, GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc, + size, DMA_BIDIRECTIONAL); + + if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) { + ring->desc_dma_addr = 0; + kfree(ring->desc); + ring->desc = NULL; + return -ENOMEM; + } + + return 0; +} + +static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring) +{ + dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr, + ring->desc_num * sizeof(ring->desc[0]), + hclgevf_ring_to_dma_dir(ring)); + + ring->desc_dma_addr = 0; + kfree(ring->desc); + ring->desc = NULL; +} + +static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev, + struct hclgevf_cmq_ring *ring) +{ + struct hclgevf_hw *hw = &hdev->hw; + int ring_type = ring->flag; + u32 reg_val; + int ret; + + ring->desc_num = HCLGEVF_NIC_CMQ_DESC_NUM; + spin_lock_init(&ring->lock); + ring->next_to_clean = 0; + ring->next_to_use = 0; + ring->dev = hdev; + + /* allocate CSQ/CRQ descriptor */ + ret = hclgevf_alloc_cmd_desc(ring); + if (ret) { + dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret, + (ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ"); + return ret; + } + + /* initialize the hardware registers with csq/crq dma-address, + * descriptor number, head & tail pointers + */ + switch (ring_type) { + case HCLGEVF_TYPE_CSQ: + reg_val = (u32)ring->desc_dma_addr; + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val); + reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val); + + reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); + reg_val |= HCLGEVF_NIC_CMQ_ENABLE; + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val); + + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0); + break; + case HCLGEVF_TYPE_CRQ: + reg_val = (u32)ring->desc_dma_addr; + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val); + reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val); + + reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); + reg_val |= HCLGEVF_NIC_CMQ_ENABLE; + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val); + + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0); + break; + } + + return 0; +} + +void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc, + enum hclgevf_opcode_type opcode, bool is_read) +{ + memset(desc, 0, sizeof(struct hclgevf_desc)); + desc->opcode = cpu_to_le16(opcode); + desc->flag = cpu_to_le16(HCLGEVF_CMD_FLAG_NO_INTR | + HCLGEVF_CMD_FLAG_IN); + if (is_read) + desc->flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_WR); + else + desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR); +} + +/* hclgevf_cmd_send - send command to command queue + * @hw: pointer to the hw struct + * @desc: prefilled descriptor for describing the command + * @num : the number of descriptors to be sent + * + * This is the main send command for command queue, it + * sends the queue, cleans the queue, etc + */ +int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num) +{ + struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev; + struct hclgevf_desc *desc_to_use; + bool complete = false; + u32 timeout = 0; + int handle = 0; + int status = 0; + u16 retval; + u16 opcode; + int ntc; + + spin_lock_bh(&hw->cmq.csq.lock); + + if (num > hclgevf_ring_space(&hw->cmq.csq)) { + spin_unlock_bh(&hw->cmq.csq.lock); + return -EBUSY; + } + + /* Record the location of desc in the ring for this time + * which will be use for hardware to write back + */ + ntc = hw->cmq.csq.next_to_use; + opcode = le16_to_cpu(desc[0].opcode); + while (handle < num) { + desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; + *desc_to_use = desc[handle]; + (hw->cmq.csq.next_to_use)++; + if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num) + hw->cmq.csq.next_to_use = 0; + handle++; + } + + /* Write to hardware */ + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, + hw->cmq.csq.next_to_use); + + /* If the command is sync, wait for the firmware to write back, + * if multi descriptors to be sent, use the first one to check + */ + if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) { + do { + if (hclgevf_cmd_csq_done(hw)) + break; + udelay(1); + timeout++; + } while (timeout < hw->cmq.tx_timeout); + } + + if (hclgevf_cmd_csq_done(hw)) { + complete = true; + handle = 0; + + while (handle < num) { + /* Get the result of hardware write back */ + desc_to_use = &hw->cmq.csq.desc[ntc]; + desc[handle] = *desc_to_use; + + if (likely(!hclgevf_is_special_opcode(opcode))) + retval = le16_to_cpu(desc[handle].retval); + else + retval = le16_to_cpu(desc[0].retval); + + if ((enum hclgevf_cmd_return_status)retval == + HCLGEVF_CMD_EXEC_SUCCESS) + status = 0; + else + status = -EIO; + hw->cmq.last_status = (enum hclgevf_cmd_status)retval; + ntc++; + handle++; + if (ntc == hw->cmq.csq.desc_num) + ntc = 0; + } + } + + if (!complete) + status = -EAGAIN; + + /* Clean the command send queue */ + handle = hclgevf_cmd_csq_clean(hw); + if (handle != num) { + dev_warn(&hdev->pdev->dev, + "cleaned %d, need to clean %d\n", handle, num); + } + + spin_unlock_bh(&hw->cmq.csq.lock); + + return status; +} + +static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw, + u32 *version) +{ + struct hclgevf_query_version_cmd *resp; + struct hclgevf_desc desc; + int status; + + resp = (struct hclgevf_query_version_cmd *)desc.data; + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1); + status = hclgevf_cmd_send(hw, &desc, 1); + if (!status) + *version = le32_to_cpu(resp->firmware); + + return status; +} + +int hclgevf_cmd_init(struct hclgevf_dev *hdev) +{ + u32 version; + int ret; + + /* setup Tx write back timeout */ + hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT; + + /* setup queue CSQ/CRQ rings */ + hdev->hw.cmq.csq.flag = HCLGEVF_TYPE_CSQ; + ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.csq); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to initialize CSQ ring\n", ret); + return ret; + } + + hdev->hw.cmq.crq.flag = HCLGEVF_TYPE_CRQ; + ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.crq); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to initialize CRQ ring\n", ret); + goto err_csq; + } + + /* get firmware version */ + ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to query firmware version\n", ret); + goto err_crq; + } + hdev->fw_version = version; + + dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version); + + return 0; +err_crq: + hclgevf_free_cmd_desc(&hdev->hw.cmq.crq); +err_csq: + hclgevf_free_cmd_desc(&hdev->hw.cmq.csq); + + return ret; +} + +void hclgevf_cmd_uninit(struct hclgevf_dev *hdev) +{ + hclgevf_free_cmd_desc(&hdev->hw.cmq.csq); + hclgevf_free_cmd_desc(&hdev->hw.cmq.crq); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h new file mode 100644 index 000000000000..ad8adfecbb22 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -0,0 +1,256 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2017 Hisilicon Limited. */ + +#ifndef __HCLGEVF_CMD_H +#define __HCLGEVF_CMD_H +#include +#include +#include "hnae3.h" + +#define HCLGEVF_CMDQ_TX_TIMEOUT 200 +#define HCLGEVF_CMDQ_RX_INVLD_B 0 +#define HCLGEVF_CMDQ_RX_OUTVLD_B 1 + +struct hclgevf_hw; +struct hclgevf_dev; + +struct hclgevf_desc { + __le16 opcode; + __le16 flag; + __le16 retval; + __le16 rsv; + __le32 data[6]; +}; + +struct hclgevf_desc_cb { + dma_addr_t dma; + void *va; + u32 length; +}; + +struct hclgevf_cmq_ring { + dma_addr_t desc_dma_addr; + struct hclgevf_desc *desc; + struct hclgevf_desc_cb *desc_cb; + struct hclgevf_dev *dev; + u32 head; + u32 tail; + + u16 buf_size; + u16 desc_num; + int next_to_use; + int next_to_clean; + u8 flag; + spinlock_t lock; /* Command queue lock */ +}; + +enum hclgevf_cmd_return_status { + HCLGEVF_CMD_EXEC_SUCCESS = 0, + HCLGEVF_CMD_NO_AUTH = 1, + HCLGEVF_CMD_NOT_EXEC = 2, + HCLGEVF_CMD_QUEUE_FULL = 3, +}; + +enum hclgevf_cmd_status { + HCLGEVF_STATUS_SUCCESS = 0, + HCLGEVF_ERR_CSQ_FULL = -1, + HCLGEVF_ERR_CSQ_TIMEOUT = -2, + HCLGEVF_ERR_CSQ_ERROR = -3 +}; + +struct hclgevf_cmq { + struct hclgevf_cmq_ring csq; + struct hclgevf_cmq_ring crq; + u16 tx_timeout; /* Tx timeout */ + enum hclgevf_cmd_status last_status; +}; + +#define HCLGEVF_CMD_FLAG_IN_VALID_SHIFT 0 +#define HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT 1 +#define HCLGEVF_CMD_FLAG_NEXT_SHIFT 2 +#define HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT 3 +#define HCLGEVF_CMD_FLAG_NO_INTR_SHIFT 4 +#define HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT 5 + +#define HCLGEVF_CMD_FLAG_IN BIT(HCLGEVF_CMD_FLAG_IN_VALID_SHIFT) +#define HCLGEVF_CMD_FLAG_OUT BIT(HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT) +#define HCLGEVF_CMD_FLAG_NEXT BIT(HCLGEVF_CMD_FLAG_NEXT_SHIFT) +#define HCLGEVF_CMD_FLAG_WR BIT(HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT) +#define HCLGEVF_CMD_FLAG_NO_INTR BIT(HCLGEVF_CMD_FLAG_NO_INTR_SHIFT) +#define HCLGEVF_CMD_FLAG_ERR_INTR BIT(HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT) + +enum hclgevf_opcode_type { + /* Generic command */ + HCLGEVF_OPC_QUERY_FW_VER = 0x0001, + /* TQP command */ + HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03, + HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13, + HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20, + /* TSO cmd */ + HCLGEVF_OPC_TSO_GENERIC_CONFIG = 0x0C01, + /* RSS cmd */ + HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01, + HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07, + HCLGEVF_OPC_RSS_TC_MODE = 0x0D08, + /* Mailbox cmd */ + HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001, +}; + +#define HCLGEVF_TQP_REG_OFFSET 0x80000 +#define HCLGEVF_TQP_REG_SIZE 0x200 + +struct hclgevf_tqp_map { + __le16 tqp_id; /* Absolute tqp id for in this pf */ + u8 tqp_vf; /* VF id */ +#define HCLGEVF_TQP_MAP_TYPE_PF 0 +#define HCLGEVF_TQP_MAP_TYPE_VF 1 +#define HCLGEVF_TQP_MAP_TYPE_B 0 +#define HCLGEVF_TQP_MAP_EN_B 1 + u8 tqp_flag; /* Indicate it's pf or vf tqp */ + __le16 tqp_vid; /* Virtual id in this pf/vf */ + u8 rsv[18]; +}; + +#define HCLGEVF_VECTOR_ELEMENTS_PER_CMD 10 + +enum hclgevf_int_type { + HCLGEVF_INT_TX = 0, + HCLGEVF_INT_RX, + HCLGEVF_INT_EVENT, +}; + +struct hclgevf_ctrl_vector_chain { + u8 int_vector_id; + u8 int_cause_num; +#define HCLGEVF_INT_TYPE_S 0 +#define HCLGEVF_INT_TYPE_M 0x3 +#define HCLGEVF_TQP_ID_S 2 +#define HCLGEVF_TQP_ID_M (0x3fff << HCLGEVF_TQP_ID_S) + __le16 tqp_type_and_id[HCLGEVF_VECTOR_ELEMENTS_PER_CMD]; + u8 vfid; + u8 resv; +}; + +struct hclgevf_query_version_cmd { + __le32 firmware; + __le32 firmware_rsv[5]; +}; + +#define HCLGEVF_RSS_HASH_KEY_OFFSET 4 +#define HCLGEVF_RSS_HASH_KEY_NUM 16 +struct hclgevf_rss_config_cmd { + u8 hash_config; + u8 rsv[7]; + u8 hash_key[HCLGEVF_RSS_HASH_KEY_NUM]; +}; + +struct hclgevf_rss_input_tuple_cmd { + u8 ipv4_tcp_en; + u8 ipv4_udp_en; + u8 ipv4_stcp_en; + u8 ipv4_fragment_en; + u8 ipv6_tcp_en; + u8 ipv6_udp_en; + u8 ipv6_stcp_en; + u8 ipv6_fragment_en; + u8 rsv[16]; +}; + +#define HCLGEVF_RSS_CFG_TBL_SIZE 16 + +struct hclgevf_rss_indirection_table_cmd { + u16 start_table_index; + u16 rss_set_bitmap; + u8 rsv[4]; + u8 rss_result[HCLGEVF_RSS_CFG_TBL_SIZE]; +}; + +#define HCLGEVF_RSS_TC_OFFSET_S 0 +#define HCLGEVF_RSS_TC_OFFSET_M (0x3ff << HCLGEVF_RSS_TC_OFFSET_S) +#define HCLGEVF_RSS_TC_SIZE_S 12 +#define HCLGEVF_RSS_TC_SIZE_M (0x7 << HCLGEVF_RSS_TC_SIZE_S) +#define HCLGEVF_RSS_TC_VALID_B 15 +#define HCLGEVF_MAX_TC_NUM 8 +struct hclgevf_rss_tc_mode_cmd { + u16 rss_tc_mode[HCLGEVF_MAX_TC_NUM]; + u8 rsv[8]; +}; + +#define HCLGEVF_LINK_STS_B 0 +#define HCLGEVF_LINK_STATUS BIT(HCLGEVF_LINK_STS_B) +struct hclgevf_link_status_cmd { + u8 status; + u8 rsv[23]; +}; + +#define HCLGEVF_RING_ID_MASK 0x3ff +#define HCLGEVF_TQP_ENABLE_B 0 + +struct hclgevf_cfg_com_tqp_queue_cmd { + __le16 tqp_id; + __le16 stream_id; + u8 enable; + u8 rsv[19]; +}; + +struct hclgevf_cfg_tx_queue_pointer_cmd { + __le16 tqp_id; + __le16 tx_tail; + __le16 tx_head; + __le16 fbd_num; + __le16 ring_offset; + u8 rsv[14]; +}; + +#define HCLGEVF_TSO_ENABLE_B 0 +struct hclgevf_cfg_tso_status_cmd { + u8 tso_enable; + u8 rsv[23]; +}; + +#define HCLGEVF_TYPE_CRQ 0 +#define HCLGEVF_TYPE_CSQ 1 +#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000 +#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG 0x27004 +#define HCLGEVF_NIC_CSQ_DEPTH_REG 0x27008 +#define HCLGEVF_NIC_CSQ_TAIL_REG 0x27010 +#define HCLGEVF_NIC_CSQ_HEAD_REG 0x27014 +#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG 0x27018 +#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG 0x2701c +#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020 +#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024 +#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028 +#define HCLGEVF_NIC_CMQ_EN_B 16 +#define HCLGEVF_NIC_CMQ_ENABLE BIT(HCLGEVF_NIC_CMQ_EN_B) +#define HCLGEVF_NIC_CMQ_DESC_NUM 1024 +#define HCLGEVF_NIC_CMQ_DESC_NUM_S 3 +#define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100 + +static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value) +{ + writel(value, base + reg); +} + +static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg) +{ + u8 __iomem *reg_addr = READ_ONCE(base); + + return readl(reg_addr + reg); +} + +#define hclgevf_write_dev(a, reg, value) \ + hclgevf_write_reg((a)->io_base, (reg), (value)) +#define hclgevf_read_dev(a, reg) \ + hclgevf_read_reg((a)->io_base, (reg)) + +#define HCLGEVF_SEND_SYNC(flag) \ + ((flag) & HCLGEVF_CMD_FLAG_NO_INTR) + +int hclgevf_cmd_init(struct hclgevf_dev *hdev); +void hclgevf_cmd_uninit(struct hclgevf_dev *hdev); + +int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num); +void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc, + enum hclgevf_opcode_type opcode, + bool is_read); +#endif -- cgit From b11a0bb231f3d83429c5e88451ca85ce27c4a9dd Mon Sep 17 00:00:00 2001 From: Salil Mehta Date: Thu, 14 Dec 2017 18:03:03 +0000 Subject: net: hns3: Add mailbox support to VF driver This patch adds the support of the mailbox to the VF driver. The mailbox shall be used as an interface to communicate with the PF driver for various purposes like {set|get} MAC related operations, reset, link status etc. The mailbox supports both synchronous and asynchronous command send to PF driver. Signed-off-by: Salil Mehta Signed-off-by: lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h | 88 ++++++++++ .../ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c | 181 +++++++++++++++++++++ 2 files changed, 269 insertions(+) create mode 100644 drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h new file mode 100644 index 000000000000..3e9203ea42a6 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2017 Hisilicon Limited. */ + +#ifndef __HCLGE_MBX_H +#define __HCLGE_MBX_H +#include +#include +#include + +#define HCLGE_MBX_VF_MSG_DATA_NUM 16 + +enum HCLGE_MBX_OPCODE { + HCLGE_MBX_RESET = 0x01, /* (VF -> PF) assert reset */ + HCLGE_MBX_SET_UNICAST, /* (VF -> PF) set UC addr */ + HCLGE_MBX_SET_MULTICAST, /* (VF -> PF) set MC addr */ + HCLGE_MBX_SET_VLAN, /* (VF -> PF) set VLAN */ + HCLGE_MBX_MAP_RING_TO_VECTOR, /* (VF -> PF) map ring-to-vector */ + HCLGE_MBX_UNMAP_RING_TO_VECTOR, /* (VF -> PF) unamp ring-to-vector */ + HCLGE_MBX_SET_PROMISC_MODE, /* (VF -> PF) set promiscuous mode */ + HCLGE_MBX_SET_MACVLAN, /* (VF -> PF) set unicast filter */ + HCLGE_MBX_API_NEGOTIATE, /* (VF -> PF) negotiate API version */ + HCLGE_MBX_GET_QINFO, /* (VF -> PF) get queue config */ + HCLGE_MBX_GET_TCINFO, /* (VF -> PF) get TC config */ + HCLGE_MBX_GET_RETA, /* (VF -> PF) get RETA */ + HCLGE_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */ + HCLGE_MBX_GET_MAC_ADDR, /* (VF -> PF) get MAC addr */ + HCLGE_MBX_PF_VF_RESP, /* (PF -> VF) generate respone to VF */ + HCLGE_MBX_GET_BDNUM, /* (VF -> PF) get BD num */ + HCLGE_MBX_GET_BUFSIZE, /* (VF -> PF) get buffer size */ + HCLGE_MBX_GET_STREAMID, /* (VF -> PF) get stream id */ + HCLGE_MBX_SET_AESTART, /* (VF -> PF) start ae */ + HCLGE_MBX_SET_TSOSTATS, /* (VF -> PF) get tso stats */ + HCLGE_MBX_LINK_STAT_CHANGE, /* (PF -> VF) link status has changed */ + HCLGE_MBX_GET_BASE_CONFIG, /* (VF -> PF) get config */ + HCLGE_MBX_BIND_FUNC_QUEUE, /* (VF -> PF) bind function and queue */ + HCLGE_MBX_GET_LINK_STATUS, /* (VF -> PF) get link status */ + HCLGE_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */ +}; + +/* below are per-VF mac-vlan subcodes */ +enum hclge_mbx_mac_vlan_subcode { + HCLGE_MBX_MAC_VLAN_UC_MODIFY = 0, /* modify UC mac addr */ + HCLGE_MBX_MAC_VLAN_UC_ADD, /* add a new UC mac addr */ + HCLGE_MBX_MAC_VLAN_UC_REMOVE, /* remove a new UC mac addr */ + HCLGE_MBX_MAC_VLAN_MC_MODIFY, /* modify MC mac addr */ + HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */ + HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */ + HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, /* config func MTA enable */ +}; + +/* below are per-VF vlan cfg subcodes */ +enum hclge_mbx_vlan_cfg_subcode { + HCLGE_MBX_VLAN_FILTER = 0, /* set vlan filter */ + HCLGE_MBX_VLAN_TX_OFF_CFG, /* set tx side vlan offload */ + HCLGE_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */ +}; + +#define HCLGE_MBX_MAX_MSG_SIZE 16 +#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8 + +struct hclgevf_mbx_resp_status { + struct mutex mbx_mutex; /* protects against contending sync cmd resp */ + u32 origin_mbx_msg; + bool received_resp; + int resp_status; + u8 additional_info[HCLGE_MBX_MAX_RESP_DATA_SIZE]; +}; + +struct hclge_mbx_vf_to_pf_cmd { + u8 rsv; + u8 mbx_src_vfid; /* Auto filled by IMP */ + u8 rsv1[2]; + u8 msg_len; + u8 rsv2[3]; + u8 msg[HCLGE_MBX_MAX_MSG_SIZE]; +}; + +struct hclge_mbx_pf_to_vf_cmd { + u8 dest_vfid; + u8 rsv[3]; + u8 msg_len; + u8 rsv1[3]; + u16 msg[8]; +}; + +#define hclge_mbx_ring_ptr_move_crq(crq) \ + (crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num) +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c new file mode 100644 index 000000000000..e39cad285fa9 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include "hclge_mbx.h" +#include "hclgevf_main.h" +#include "hnae3.h" + +static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev) +{ + /* this function should be called with mbx_resp.mbx_mutex held + * to prtect the received_response from race condition + */ + hdev->mbx_resp.received_resp = false; + hdev->mbx_resp.origin_mbx_msg = 0; + hdev->mbx_resp.resp_status = 0; + memset(hdev->mbx_resp.additional_info, 0, HCLGE_MBX_MAX_RESP_DATA_SIZE); +} + +/* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox + * message to PF. + * @hdev: pointer to struct hclgevf_dev + * @resp_msg: pointer to store the original message type and response status + * @len: the resp_msg data array length. + */ +static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, + u8 *resp_data, u16 resp_len) +{ +#define HCLGEVF_MAX_TRY_TIMES 500 +#define HCLGEVF_SLEEP_USCOEND 1000 + struct hclgevf_mbx_resp_status *mbx_resp; + u16 r_code0, r_code1; + int i = 0; + + if (resp_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) { + dev_err(&hdev->pdev->dev, + "VF mbx response len(=%d) exceeds maximum(=%d)\n", + resp_len, + HCLGE_MBX_MAX_RESP_DATA_SIZE); + return -EINVAL; + } + + while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) { + udelay(HCLGEVF_SLEEP_USCOEND); + i++; + } + + if (i >= HCLGEVF_MAX_TRY_TIMES) { + dev_err(&hdev->pdev->dev, + "VF could not get mbx resp(=%d) from PF in %d tries\n", + hdev->mbx_resp.received_resp, i); + return -EIO; + } + + mbx_resp = &hdev->mbx_resp; + r_code0 = (u16)(mbx_resp->origin_mbx_msg >> 16); + r_code1 = (u16)(mbx_resp->origin_mbx_msg & 0xff); + if (resp_data) + memcpy(resp_data, &mbx_resp->additional_info[0], resp_len); + + hclgevf_reset_mbx_resp_status(hdev); + + if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) { + dev_err(&hdev->pdev->dev, + "VF could not match resp code(code0=%d,code1=%d), %d", + code0, code1, mbx_resp->resp_status); + return -EIO; + } + + return 0; +} + +int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode, + const u8 *msg_data, u8 msg_len, bool need_resp, + u8 *resp_data, u16 resp_len) +{ + struct hclge_mbx_vf_to_pf_cmd *req; + struct hclgevf_desc desc; + int status; + + req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; + + /* first two bytes are reserved for code & subcode */ + if (msg_len > (HCLGE_MBX_MAX_MSG_SIZE - 2)) { + dev_err(&hdev->pdev->dev, + "VF send mbx msg fail, msg len %d exceeds max len %d\n", + msg_len, HCLGE_MBX_MAX_MSG_SIZE); + return -EINVAL; + } + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); + req->msg[0] = code; + req->msg[1] = subcode; + memcpy(&req->msg[2], msg_data, msg_len); + + /* synchronous send */ + if (need_resp) { + mutex_lock(&hdev->mbx_resp.mbx_mutex); + hclgevf_reset_mbx_resp_status(hdev); + status = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "VF failed(=%d) to send mbx message to PF\n", + status); + mutex_unlock(&hdev->mbx_resp.mbx_mutex); + return status; + } + + status = hclgevf_get_mbx_resp(hdev, code, subcode, resp_data, + resp_len); + mutex_unlock(&hdev->mbx_resp.mbx_mutex); + } else { + /* asynchronous send */ + status = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "VF failed(=%d) to send mbx message to PF\n", + status); + return status; + } + } + + return status; +} + +void hclgevf_mbx_handler(struct hclgevf_dev *hdev) +{ + struct hclgevf_mbx_resp_status *resp; + struct hclge_mbx_pf_to_vf_cmd *req; + struct hclgevf_cmq_ring *crq; + struct hclgevf_desc *desc; + u16 link_status, flag; + u8 *temp; + int i; + + resp = &hdev->mbx_resp; + crq = &hdev->hw.cmq.crq; + + flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); + while (hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B)) { + desc = &crq->desc[crq->next_to_use]; + req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data; + + switch (req->msg[0]) { + case HCLGE_MBX_PF_VF_RESP: + if (resp->received_resp) + dev_warn(&hdev->pdev->dev, + "VF mbx resp flag not clear(%d)\n", + req->msg[1]); + resp->received_resp = true; + + resp->origin_mbx_msg = (req->msg[1] << 16); + resp->origin_mbx_msg |= req->msg[2]; + resp->resp_status = req->msg[3]; + + temp = (u8 *)&req->msg[4]; + for (i = 0; i < HCLGE_MBX_MAX_RESP_DATA_SIZE; i++) { + resp->additional_info[i] = *temp; + temp++; + } + break; + case HCLGE_MBX_LINK_STAT_CHANGE: + link_status = le16_to_cpu(req->msg[1]); + + /* update upper layer with new link link status */ + hclgevf_update_link_status(hdev, link_status); + + break; + default: + dev_err(&hdev->pdev->dev, + "VF received unsupported(%d) mbx msg from PF\n", + req->msg[0]); + break; + } + hclge_mbx_ring_ptr_move_crq(crq); + flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); + } + + /* Write back CMDQ_RQ header pointer, M7 need this pointer */ + hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CRQ_HEAD_REG, + crq->next_to_use); +} -- cgit From e2cb1dec9779ba2d89302a653eb0abaeb8682196 Mon Sep 17 00:00:00 2001 From: Salil Mehta Date: Thu, 14 Dec 2017 18:03:04 +0000 Subject: net: hns3: Add HNS3 VF HCL(Hardware Compatibility Layer) Support This patch adds the support of hardware compatibiltiy layer to the HNS3 VF Driver. This layer implements various {set|get} operations over MAC address for a virtual port, RSS related configuration, fetches the link status info from PF, does various VLAN related configuration over the virtual port, queries the statistics from the hardware etc. This layer can directly interact with hardware through the IMP(Integrated Mangement Processor) interface or can use mailbox to interact with the PF driver. Signed-off-by: Salil Mehta Signed-off-by: lipeng Signed-off-by: David S. Miller --- .../ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 1489 ++++++++++++++++++++ .../ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h | 164 +++ 2 files changed, 1653 insertions(+) create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c new file mode 100644 index 000000000000..f5a2a69f8249 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -0,0 +1,1489 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include "hclgevf_cmd.h" +#include "hclgevf_main.h" +#include "hclge_mbx.h" +#include "hnae3.h" + +#define HCLGEVF_NAME "hclgevf" + +static struct hnae3_ae_algo ae_algovf; + +static const struct pci_device_id ae_algovf_pci_tbl[] = { + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, + /* required last entry */ + {0, } +}; + +static inline struct hclgevf_dev *hclgevf_ae_get_hdev( + struct hnae3_handle *handle) +{ + return container_of(handle, struct hclgevf_dev, nic); +} + +static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_queue *queue; + struct hclgevf_desc desc; + struct hclgevf_tqp *tqp; + int status; + int i; + + for (i = 0; i < hdev->num_tqps; i++) { + queue = handle->kinfo.tqp[i]; + tqp = container_of(queue, struct hclgevf_tqp, q); + hclgevf_cmd_setup_basic_desc(&desc, + HCLGEVF_OPC_QUERY_RX_STATUS, + true); + + desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); + status = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "Query tqp stat fail, status = %d,queue = %d\n", + status, i); + return status; + } + tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += + le32_to_cpu(desc.data[4]); + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, + true); + + desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); + status = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "Query tqp stat fail, status = %d,queue = %d\n", + status, i); + return status; + } + tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += + le32_to_cpu(desc.data[4]); + } + + return 0; +} + +static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclgevf_tqp *tqp; + u64 *buff = data; + int i; + + for (i = 0; i < hdev->num_tqps; i++) { + tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); + *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; + } + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); + *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; + } + + return buff; +} + +static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hdev->num_tqps * 2; +} + +static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u8 *buff = data; + int i = 0; + + for (i = 0; i < hdev->num_tqps; i++) { + struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], + struct hclgevf_tqp, q); + snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd", + tqp->index); + buff += ETH_GSTRING_LEN; + } + + for (i = 0; i < hdev->num_tqps; i++) { + struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], + struct hclgevf_tqp, q); + snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd", + tqp->index); + buff += ETH_GSTRING_LEN; + } + + return buff; +} + +static void hclgevf_update_stats(struct hnae3_handle *handle, + struct net_device_stats *net_stats) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int status; + + status = hclgevf_tqps_update_stats(handle); + if (status) + dev_err(&hdev->pdev->dev, + "VF update of TQPS stats fail, status = %d.\n", + status); +} + +static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) +{ + if (strset == ETH_SS_TEST) + return -EOPNOTSUPP; + else if (strset == ETH_SS_STATS) + return hclgevf_tqps_get_sset_count(handle, strset); + + return 0; +} + +static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, + u8 *data) +{ + u8 *p = (char *)data; + + if (strset == ETH_SS_STATS) + p = hclgevf_tqps_get_strings(handle, p); +} + +static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) +{ + hclgevf_tqps_get_stats(handle, data); +} + +static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) +{ + u8 resp_msg; + int status; + + status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, + true, &resp_msg, sizeof(u8)); + if (status) { + dev_err(&hdev->pdev->dev, + "VF request to get TC info from PF failed %d", + status); + return status; + } + + hdev->hw_tc_map = resp_msg; + + return 0; +} + +static int hclge_get_queue_info(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_TQPS_RSS_INFO_LEN 8 + u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; + int status; + + status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, + true, resp_msg, + HCLGEVF_TQPS_RSS_INFO_LEN); + if (status) { + dev_err(&hdev->pdev->dev, + "VF request to get tqp info from PF failed %d", + status); + return status; + } + + memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); + memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); + memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); + memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); + + return 0; +} + +static int hclgevf_enable_tso(struct hclgevf_dev *hdev, int enable) +{ + struct hclgevf_cfg_tso_status_cmd *req; + struct hclgevf_desc desc; + + req = (struct hclgevf_cfg_tso_status_cmd *)desc.data; + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_TSO_GENERIC_CONFIG, + false); + hnae_set_bit(req->tso_enable, HCLGEVF_TSO_ENABLE_B, enable); + + return hclgevf_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) +{ + struct hclgevf_tqp *tqp; + int i; + + hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, + sizeof(struct hclgevf_tqp), GFP_KERNEL); + if (!hdev->htqp) + return -ENOMEM; + + tqp = hdev->htqp; + + for (i = 0; i < hdev->num_tqps; i++) { + tqp->dev = &hdev->pdev->dev; + tqp->index = i; + + tqp->q.ae_algo = &ae_algovf; + tqp->q.buf_size = hdev->rx_buf_len; + tqp->q.desc_num = hdev->num_desc; + tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + + i * HCLGEVF_TQP_REG_SIZE; + + tqp++; + } + + return 0; +} + +static int hclgevf_knic_setup(struct hclgevf_dev *hdev) +{ + struct hnae3_handle *nic = &hdev->nic; + struct hnae3_knic_private_info *kinfo; + u16 new_tqps = hdev->num_tqps; + int i; + + kinfo = &nic->kinfo; + kinfo->num_tc = 0; + kinfo->num_desc = hdev->num_desc; + kinfo->rx_buf_len = hdev->rx_buf_len; + for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) + if (hdev->hw_tc_map & BIT(i)) + kinfo->num_tc++; + + kinfo->rss_size + = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); + new_tqps = kinfo->rss_size * kinfo->num_tc; + kinfo->num_tqps = min(new_tqps, hdev->num_tqps); + + kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, + sizeof(struct hnae3_queue *), GFP_KERNEL); + if (!kinfo->tqp) + return -ENOMEM; + + for (i = 0; i < kinfo->num_tqps; i++) { + hdev->htqp[i].q.handle = &hdev->nic; + hdev->htqp[i].q.tqp_index = i; + kinfo->tqp[i] = &hdev->htqp[i].q; + } + + return 0; +} + +static void hclgevf_request_link_info(struct hclgevf_dev *hdev) +{ + int status; + u8 resp_msg; + + status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, + 0, false, &resp_msg, sizeof(u8)); + if (status) + dev_err(&hdev->pdev->dev, + "VF failed to fetch link status(%d) from PF", status); +} + +void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) +{ + struct hnae3_handle *handle = &hdev->nic; + struct hnae3_client *client; + + client = handle->client; + + if (link_state != hdev->hw.mac.link) { + client->ops->link_status_change(handle, !!link_state); + hdev->hw.mac.link = link_state; + } +} + +static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) +{ + struct hnae3_handle *nic = &hdev->nic; + int ret; + + nic->ae_algo = &ae_algovf; + nic->pdev = hdev->pdev; + nic->numa_node_mask = hdev->numa_node_mask; + + if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { + dev_err(&hdev->pdev->dev, "unsupported device type %d\n", + hdev->ae_dev->dev_type); + return -EINVAL; + } + + ret = hclgevf_knic_setup(hdev); + if (ret) + dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", + ret); + return ret; +} + +static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) +{ + hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; + hdev->num_msi_left += 1; + hdev->num_msi_used -= 1; +} + +static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, + struct hnae3_vector_info *vector_info) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_vector_info *vector = vector_info; + int alloc = 0; + int i, j; + + vector_num = min(hdev->num_msi_left, vector_num); + + for (j = 0; j < vector_num; j++) { + for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { + if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { + vector->vector = pci_irq_vector(hdev->pdev, i); + vector->io_addr = hdev->hw.io_base + + HCLGEVF_VECTOR_REG_BASE + + (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; + hdev->vector_status[i] = 0; + hdev->vector_irq[i] = vector->vector; + + vector++; + alloc++; + + break; + } + } + } + hdev->num_msi_left -= alloc; + hdev->num_msi_used += alloc; + + return alloc; +} + +static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) +{ + int i; + + for (i = 0; i < hdev->num_msi; i++) + if (vector == hdev->vector_irq[i]) + return i; + + return -EINVAL; +} + +static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) +{ + return HCLGEVF_RSS_KEY_SIZE; +} + +static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) +{ + return HCLGEVF_RSS_IND_TBL_SIZE; +} + +static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) +{ + const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; + struct hclgevf_rss_indirection_table_cmd *req; + struct hclgevf_desc desc; + int status; + int i, j; + + req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; + + for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, + false); + req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; + req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; + for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) + req->rss_result[j] = + indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; + + status = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "VF failed(=%d) to set RSS indirection table\n", + status); + return status; + } + } + + return 0; +} + +static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) +{ + struct hclgevf_rss_tc_mode_cmd *req; + u16 tc_offset[HCLGEVF_MAX_TC_NUM]; + u16 tc_valid[HCLGEVF_MAX_TC_NUM]; + u16 tc_size[HCLGEVF_MAX_TC_NUM]; + struct hclgevf_desc desc; + u16 roundup_size; + int status; + int i; + + req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; + + roundup_size = roundup_pow_of_two(rss_size); + roundup_size = ilog2(roundup_size); + + for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { + tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); + tc_size[i] = roundup_size; + tc_offset[i] = rss_size * i; + } + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); + for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { + hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, + (tc_valid[i] & 0x1)); + hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, + HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); + hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, + HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); + } + status = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "VF failed(=%d) to set rss tc mode\n", status); + + return status; +} + +static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash, + u8 *key) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclgevf_rss_config_cmd *req; + int lkup_times = key ? 3 : 1; + struct hclgevf_desc desc; + int key_offset; + int key_size; + int status; + + req = (struct hclgevf_rss_config_cmd *)desc.data; + lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0); + + for (key_offset = 0; key_offset < lkup_times; key_offset++) { + hclgevf_cmd_setup_basic_desc(&desc, + HCLGEVF_OPC_RSS_GENERIC_CONFIG, + true); + req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET); + + status = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "failed to get hardware RSS cfg, status = %d\n", + status); + return status; + } + + if (key_offset == 2) + key_size = + HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; + else + key_size = HCLGEVF_RSS_HASH_KEY_NUM; + + if (key) + memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, + req->hash_key, + key_size); + } + + if (hash) { + if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ) + *hash = ETH_RSS_HASH_TOP; + else + *hash = ETH_RSS_HASH_UNKNOWN; + } + + return 0; +} + +static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + int i; + + if (indir) + for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) + indir[i] = rss_cfg->rss_indirection_tbl[i]; + + return hclgevf_get_rss_hw_cfg(handle, hfunc, key); +} + +static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + int i; + + /* update the shadow RSS table with user specified qids */ + for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) + rss_cfg->rss_indirection_tbl[i] = indir[i]; + + /* update the hardware */ + return hclgevf_set_rss_indir_table(hdev); +} + +static int hclgevf_get_tc_size(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + + return rss_cfg->rss_size; +} + +static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, + int vector, + struct hnae3_ring_chain_node *ring_chain) +{ +#define HCLGEVF_RING_NODE_VARIABLE_NUM 3 +#define HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM 3 + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_ring_chain_node *node; + struct hclge_mbx_vf_to_pf_cmd *req; + struct hclgevf_desc desc; + int i, vector_id; + int status; + u8 type; + + req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; + vector_id = hclgevf_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&handle->pdev->dev, + "Get vector index fail. ret =%d\n", vector_id); + return vector_id; + } + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); + type = en ? + HCLGE_MBX_MAP_RING_TO_VECTOR : HCLGE_MBX_UNMAP_RING_TO_VECTOR; + req->msg[0] = type; + req->msg[1] = vector_id; /* vector_id should be id in VF */ + + i = 0; + for (node = ring_chain; node; node = node->next) { + i++; + /* msg[2] is cause num */ + req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i] = + hnae_get_bit(node->flag, HNAE3_RING_TYPE_B); + req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 1] = + node->tqp_index; + if (i == (HCLGE_MBX_VF_MSG_DATA_NUM - + HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM) / + HCLGEVF_RING_NODE_VARIABLE_NUM) { + req->msg[2] = i; + + status = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "Map TQP fail, status is %d.\n", + status); + return status; + } + i = 0; + hclgevf_cmd_setup_basic_desc(&desc, + HCLGEVF_OPC_MBX_VF_TO_PF, + false); + req->msg[0] = type; + req->msg[1] = vector_id; + } + } + + if (i > 0) { + req->msg[2] = i; + + status = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "Map TQP fail, status is %d.\n", status); + return status; + } + } + + return 0; +} + +static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, + struct hnae3_ring_chain_node *ring_chain) +{ + return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain); +} + +static int hclgevf_unmap_ring_from_vector( + struct hnae3_handle *handle, + int vector, + struct hnae3_ring_chain_node *ring_chain) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int ret, vector_id; + + vector_id = hclgevf_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&handle->pdev->dev, + "Get vector index fail. ret =%d\n", vector_id); + return vector_id; + } + + ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain); + if (ret) { + dev_err(&handle->pdev->dev, + "Unmap ring from vector fail. vector=%d, ret =%d\n", + vector_id, + ret); + return ret; + } + + hclgevf_free_vector(hdev, vector); + + return 0; +} + +static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en) +{ + struct hclge_mbx_vf_to_pf_cmd *req; + struct hclgevf_desc desc; + int status; + + req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); + req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; + req->msg[1] = en; + + status = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Set promisc mode fail, status is %d.\n", status); + + return status; +} + +static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, u32 en) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + hclgevf_cmd_set_promisc_mode(hdev, en); +} + +static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, + int stream_id, bool enable) +{ + struct hclgevf_cfg_com_tqp_queue_cmd *req; + struct hclgevf_desc desc; + int status; + + req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, + false); + req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); + req->stream_id = cpu_to_le16(stream_id); + req->enable |= enable << HCLGEVF_TQP_ENABLE_B; + + status = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "TQP enable fail, status =%d.\n", status); + + return status; +} + +static int hclgevf_get_queue_id(struct hnae3_queue *queue) +{ + struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q); + + return tqp->index; +} + +static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_queue *queue; + struct hclgevf_tqp *tqp; + int i; + + for (i = 0; i < hdev->num_tqps; i++) { + queue = handle->kinfo.tqp[i]; + tqp = container_of(queue, struct hclgevf_tqp, q); + memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); + } +} + +static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u8 msg[2] = {0}; + + msg[0] = en; + return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, + HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, + msg, 1, false, NULL, 0); +} + +static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + ether_addr_copy(p, hdev->hw.mac.mac_addr); +} + +static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; + u8 *new_mac_addr = (u8 *)p; + u8 msg_data[ETH_ALEN * 2]; + int status; + + ether_addr_copy(msg_data, new_mac_addr); + ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); + + status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, + HCLGE_MBX_MAC_VLAN_UC_MODIFY, + msg_data, ETH_ALEN * 2, + false, NULL, 0); + if (!status) + ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); + + return status; +} + +static int hclgevf_add_uc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, + HCLGE_MBX_MAC_VLAN_UC_ADD, + addr, ETH_ALEN, false, NULL, 0); +} + +static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, + HCLGE_MBX_MAC_VLAN_UC_REMOVE, + addr, ETH_ALEN, false, NULL, 0); +} + +static int hclgevf_add_mc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, + HCLGE_MBX_MAC_VLAN_MC_ADD, + addr, ETH_ALEN, false, NULL, 0); +} + +static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, + HCLGE_MBX_MAC_VLAN_MC_REMOVE, + addr, ETH_ALEN, false, NULL, 0); +} + +static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, + __be16 proto, u16 vlan_id, + bool is_kill) +{ +#define HCLGEVF_VLAN_MBX_MSG_LEN 5 + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; + + if (vlan_id > 4095) + return -EINVAL; + + if (proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + msg_data[0] = is_kill; + memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); + memcpy(&msg_data[3], &proto, sizeof(proto)); + return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, + HCLGE_MBX_VLAN_FILTER, msg_data, + HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); +} + +static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u8 msg_data[2]; + + memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); + + hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 2, false, + NULL, 0); +} + +static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hdev->fw_version; +} + +static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) +{ + struct hclgevf_misc_vector *vector = &hdev->misc_vector; + + vector->vector_irq = pci_irq_vector(hdev->pdev, + HCLGEVF_MISC_VECTOR_NUM); + vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; + /* vector status always valid for Vector 0 */ + hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; + hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; + + hdev->num_msi_left -= 1; + hdev->num_msi_used += 1; +} + +static void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) +{ + if (!test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) + schedule_work(&hdev->mbx_service_task); +} + +static void hclgevf_task_schedule(struct hclgevf_dev *hdev) +{ + if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && + !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) + schedule_work(&hdev->service_task); +} + +static void hclgevf_service_timer(struct timer_list *t) +{ + struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); + + mod_timer(&hdev->service_timer, jiffies + 5 * HZ); + + hclgevf_task_schedule(hdev); +} + +static void hclgevf_mailbox_service_task(struct work_struct *work) +{ + struct hclgevf_dev *hdev; + + hdev = container_of(work, struct hclgevf_dev, mbx_service_task); + + if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) + return; + + clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); + + hclgevf_mbx_handler(hdev); + + clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); +} + +static void hclgevf_service_task(struct work_struct *work) +{ + struct hclgevf_dev *hdev; + + hdev = container_of(work, struct hclgevf_dev, service_task); + + /* request the link status from the PF. PF would be able to tell VF + * about such updates in future so we might remove this later + */ + hclgevf_request_link_info(hdev); + + clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); +} + +static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) +{ + hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); +} + +static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval) +{ + u32 cmdq_src_reg; + + /* fetch the events from their corresponding regs */ + cmdq_src_reg = hclgevf_read_dev(&hdev->hw, + HCLGEVF_VECTOR0_CMDQ_SRC_REG); + + /* check for vector0 mailbox(=CMDQ RX) event source */ + if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { + cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); + *clearval = cmdq_src_reg; + return true; + } + + dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); + + return false; +} + +static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) +{ + writel(en ? 1 : 0, vector->addr); +} + +static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) +{ + struct hclgevf_dev *hdev = data; + u32 clearval; + + hclgevf_enable_vector(&hdev->misc_vector, false); + if (!hclgevf_check_event_cause(hdev, &clearval)) + goto skip_sched; + + /* schedule the VF mailbox service task, if not already scheduled */ + hclgevf_mbx_task_schedule(hdev); + + hclgevf_clear_event_cause(hdev, clearval); + +skip_sched: + hclgevf_enable_vector(&hdev->misc_vector, true); + + return IRQ_HANDLED; +} + +static int hclgevf_configure(struct hclgevf_dev *hdev) +{ + int ret; + + /* get queue configuration from PF */ + ret = hclge_get_queue_info(hdev); + if (ret) + return ret; + /* get tc configuration from PF */ + return hclgevf_get_tc_info(hdev); +} + +static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) +{ + struct hnae3_handle *roce = &hdev->roce; + struct hnae3_handle *nic = &hdev->nic; + + roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM; + + if (hdev->num_msi_left < roce->rinfo.num_vectors || + hdev->num_msi_left == 0) + return -EINVAL; + + roce->rinfo.base_vector = + hdev->vector_status[hdev->num_msi_used]; + + roce->rinfo.netdev = nic->kinfo.netdev; + roce->rinfo.roce_io_base = hdev->hw.io_base; + + roce->pdev = nic->pdev; + roce->ae_algo = nic->ae_algo; + roce->numa_node_mask = nic->numa_node_mask; + + return 0; +} + +static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) +{ + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + int i, ret; + + rss_cfg->rss_size = hdev->rss_size_max; + + /* Initialize RSS indirect table for each vport */ + for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) + rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; + + ret = hclgevf_set_rss_indir_table(hdev); + if (ret) + return ret; + + return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); +} + +static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) +{ + /* other vlan config(like, VLAN TX/RX offload) would also be added + * here later + */ + return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, + false); +} + +static int hclgevf_ae_start(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int i, queue_id; + + for (i = 0; i < handle->kinfo.num_tqps; i++) { + /* ring enable */ + queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); + if (queue_id < 0) { + dev_warn(&hdev->pdev->dev, + "Get invalid queue id, ignore it\n"); + continue; + } + + hclgevf_tqp_enable(hdev, queue_id, 0, true); + } + + /* reset tqp stats */ + hclgevf_reset_tqp_stats(handle); + + hclgevf_request_link_info(hdev); + + clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); + mod_timer(&hdev->service_timer, jiffies + HZ); + + return 0; +} + +static void hclgevf_ae_stop(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int i, queue_id; + + for (i = 0; i < hdev->num_tqps; i++) { + /* Ring disable */ + queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); + if (queue_id < 0) { + dev_warn(&hdev->pdev->dev, + "Get invalid queue id, ignore it\n"); + continue; + } + + hclgevf_tqp_enable(hdev, queue_id, 0, false); + } + + /* reset tqp stats */ + hclgevf_reset_tqp_stats(handle); +} + +static void hclgevf_state_init(struct hclgevf_dev *hdev) +{ + /* setup tasks for the MBX */ + INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); + clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); + clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); + + /* setup tasks for service timer */ + timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); + + INIT_WORK(&hdev->service_task, hclgevf_service_task); + clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); + + mutex_init(&hdev->mbx_resp.mbx_mutex); + + /* bring the device down */ + set_bit(HCLGEVF_STATE_DOWN, &hdev->state); +} + +static void hclgevf_state_uninit(struct hclgevf_dev *hdev) +{ + set_bit(HCLGEVF_STATE_DOWN, &hdev->state); + + if (hdev->service_timer.function) + del_timer_sync(&hdev->service_timer); + if (hdev->service_task.func) + cancel_work_sync(&hdev->service_task); + if (hdev->mbx_service_task.func) + cancel_work_sync(&hdev->mbx_service_task); + + mutex_destroy(&hdev->mbx_resp.mbx_mutex); +} + +static int hclgevf_init_msi(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int vectors; + int i; + + hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM; + + vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (vectors < 0) { + dev_err(&pdev->dev, + "failed(%d) to allocate MSI/MSI-X vectors\n", + vectors); + return vectors; + } + if (vectors < hdev->num_msi) + dev_warn(&hdev->pdev->dev, + "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", + hdev->num_msi, vectors); + + hdev->num_msi = vectors; + hdev->num_msi_left = vectors; + hdev->base_msi_vector = pdev->irq; + + hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, + sizeof(u16), GFP_KERNEL); + if (!hdev->vector_status) { + pci_free_irq_vectors(pdev); + return -ENOMEM; + } + + for (i = 0; i < hdev->num_msi; i++) + hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; + + hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, + sizeof(int), GFP_KERNEL); + if (!hdev->vector_irq) { + pci_free_irq_vectors(pdev); + return -ENOMEM; + } + + return 0; +} + +static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + + pci_free_irq_vectors(pdev); +} + +static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) +{ + int ret = 0; + + hclgevf_get_misc_vector(hdev); + + ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, + 0, "hclgevf_cmd", hdev); + if (ret) { + dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", + hdev->misc_vector.vector_irq); + return ret; + } + + /* enable misc. vector(vector 0) */ + hclgevf_enable_vector(&hdev->misc_vector, true); + + return ret; +} + +static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) +{ + /* disable misc vector(vector 0) */ + hclgevf_enable_vector(&hdev->misc_vector, false); + free_irq(hdev->misc_vector.vector_irq, hdev); + hclgevf_free_vector(hdev, 0); +} + +static int hclgevf_init_instance(struct hclgevf_dev *hdev, + struct hnae3_client *client) +{ + int ret; + + switch (client->type) { + case HNAE3_CLIENT_KNIC: + hdev->nic_client = client; + hdev->nic.client = client; + + ret = client->ops->init_instance(&hdev->nic); + if (ret) + return ret; + + if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { + struct hnae3_client *rc = hdev->roce_client; + + ret = hclgevf_init_roce_base_info(hdev); + if (ret) + return ret; + ret = rc->ops->init_instance(&hdev->roce); + if (ret) + return ret; + } + break; + case HNAE3_CLIENT_UNIC: + hdev->nic_client = client; + hdev->nic.client = client; + + ret = client->ops->init_instance(&hdev->nic); + if (ret) + return ret; + break; + case HNAE3_CLIENT_ROCE: + hdev->roce_client = client; + hdev->roce.client = client; + + if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { + ret = hclgevf_init_roce_base_info(hdev); + if (ret) + return ret; + + ret = client->ops->init_instance(&hdev->roce); + if (ret) + return ret; + } + } + + return 0; +} + +static void hclgevf_uninit_instance(struct hclgevf_dev *hdev, + struct hnae3_client *client) +{ + /* un-init roce, if it exists */ + if (hdev->roce_client) + hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); + + /* un-init nic/unic, if this was not called by roce client */ + if ((client->ops->uninit_instance) && + (client->type != HNAE3_CLIENT_ROCE)) + client->ops->uninit_instance(&hdev->nic, 0); +} + +static int hclgevf_register_client(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + struct hclgevf_dev *hdev = ae_dev->priv; + + return hclgevf_init_instance(hdev, client); +} + +static void hclgevf_unregister_client(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + struct hclgevf_dev *hdev = ae_dev->priv; + + hclgevf_uninit_instance(hdev, client); +} + +static int hclgevf_pci_init(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct hclgevf_hw *hw; + int ret; + + ret = pci_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "failed to enable PCI device\n"); + goto err_no_drvdata; + } + + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); + goto err_disable_device; + } + + ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); + if (ret) { + dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); + goto err_disable_device; + } + + pci_set_master(pdev); + hw = &hdev->hw; + hw->hdev = hdev; + hw->io_base = pci_iomap(pdev, 2, 0);; + if (!hw->io_base) { + dev_err(&pdev->dev, "can't map configuration register space\n"); + ret = -ENOMEM; + goto err_clr_master; + } + + return 0; + +err_clr_master: + pci_clear_master(pdev); + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); +err_no_drvdata: + pci_set_drvdata(pdev, NULL); + return ret; +} + +static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + + pci_iounmap(pdev, hdev->hw.io_base); + pci_clear_master(pdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + struct pci_dev *pdev = ae_dev->pdev; + struct hclgevf_dev *hdev; + int ret; + + hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); + if (!hdev) + return -ENOMEM; + + hdev->pdev = pdev; + hdev->ae_dev = ae_dev; + ae_dev->priv = hdev; + + ret = hclgevf_pci_init(hdev); + if (ret) { + dev_err(&pdev->dev, "PCI initialization failed\n"); + return ret; + } + + ret = hclgevf_init_msi(hdev); + if (ret) { + dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); + goto err_irq_init; + } + + hclgevf_state_init(hdev); + + ret = hclgevf_misc_irq_init(hdev); + if (ret) { + dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", + ret); + goto err_misc_irq_init; + } + + ret = hclgevf_cmd_init(hdev); + if (ret) + goto err_cmd_init; + + ret = hclgevf_configure(hdev); + if (ret) { + dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); + goto err_config; + } + + ret = hclgevf_alloc_tqps(hdev); + if (ret) { + dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); + goto err_config; + } + + ret = hclgevf_set_handle_info(hdev); + if (ret) { + dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); + goto err_config; + } + + ret = hclgevf_enable_tso(hdev, true); + if (ret) { + dev_err(&pdev->dev, "failed(%d) to enable tso\n", ret); + goto err_config; + } + + /* Initialize VF's MTA */ + hdev->accept_mta_mc = true; + ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to set mta filter mode\n", ret); + goto err_config; + } + + /* Initialize RSS for this VF */ + ret = hclgevf_rss_init_hw(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to initialize RSS\n", ret); + goto err_config; + } + + ret = hclgevf_init_vlan_config(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to initialize VLAN config\n", ret); + goto err_config; + } + + pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); + + return 0; + +err_config: + hclgevf_cmd_uninit(hdev); +err_cmd_init: + hclgevf_misc_irq_uninit(hdev); +err_misc_irq_init: + hclgevf_state_uninit(hdev); + hclgevf_uninit_msi(hdev); +err_irq_init: + hclgevf_pci_uninit(hdev); + return ret; +} + +static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + struct hclgevf_dev *hdev = ae_dev->priv; + + hclgevf_cmd_uninit(hdev); + hclgevf_misc_irq_uninit(hdev); + hclgevf_state_uninit(hdev); + hclgevf_uninit_msi(hdev); + hclgevf_pci_uninit(hdev); + ae_dev->priv = NULL; +} + +static const struct hnae3_ae_ops hclgevf_ops = { + .init_ae_dev = hclgevf_init_ae_dev, + .uninit_ae_dev = hclgevf_uninit_ae_dev, + .init_client_instance = hclgevf_register_client, + .uninit_client_instance = hclgevf_unregister_client, + .start = hclgevf_ae_start, + .stop = hclgevf_ae_stop, + .map_ring_to_vector = hclgevf_map_ring_to_vector, + .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, + .get_vector = hclgevf_get_vector, + .reset_queue = hclgevf_reset_tqp, + .set_promisc_mode = hclgevf_set_promisc_mode, + .get_mac_addr = hclgevf_get_mac_addr, + .set_mac_addr = hclgevf_set_mac_addr, + .add_uc_addr = hclgevf_add_uc_addr, + .rm_uc_addr = hclgevf_rm_uc_addr, + .add_mc_addr = hclgevf_add_mc_addr, + .rm_mc_addr = hclgevf_rm_mc_addr, + .get_stats = hclgevf_get_stats, + .update_stats = hclgevf_update_stats, + .get_strings = hclgevf_get_strings, + .get_sset_count = hclgevf_get_sset_count, + .get_rss_key_size = hclgevf_get_rss_key_size, + .get_rss_indir_size = hclgevf_get_rss_indir_size, + .get_rss = hclgevf_get_rss, + .set_rss = hclgevf_set_rss, + .get_tc_size = hclgevf_get_tc_size, + .get_fw_version = hclgevf_get_fw_version, + .set_vlan_filter = hclgevf_set_vlan_filter, +}; + +static struct hnae3_ae_algo ae_algovf = { + .ops = &hclgevf_ops, + .name = HCLGEVF_NAME, + .pdev_id_table = ae_algovf_pci_tbl, +}; + +static int hclgevf_init(void) +{ + pr_info("%s is initializing\n", HCLGEVF_NAME); + + return hnae3_register_ae_algo(&ae_algovf); +} + +static void hclgevf_exit(void) +{ + hnae3_unregister_ae_algo(&ae_algovf); +} +module_init(hclgevf_init); +module_exit(hclgevf_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); +MODULE_DESCRIPTION("HCLGEVF Driver"); +MODULE_VERSION(HCLGEVF_MOD_VERSION); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h new file mode 100644 index 000000000000..a63bee4a3674 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2017 Hisilicon Limited. */ + +#ifndef __HCLGEVF_MAIN_H +#define __HCLGEVF_MAIN_H +#include +#include +#include "hclge_mbx.h" +#include "hclgevf_cmd.h" +#include "hnae3.h" + +#define HCLGEVF_MOD_VERSION "v1.0" +#define HCLGEVF_DRIVER_NAME "hclgevf" + +#define HCLGEVF_ROCEE_VECTOR_NUM 0 +#define HCLGEVF_MISC_VECTOR_NUM 0 + +#define HCLGEVF_INVALID_VPORT 0xffff + +/* This number in actual depends upon the total number of VFs + * created by physical function. But the maximum number of + * possible vector-per-VF is {VFn(1-32), VECTn(32 + 1)}. + */ +#define HCLGEVF_MAX_VF_VECTOR_NUM (32 + 1) + +#define HCLGEVF_VECTOR_REG_BASE 0x20000 +#define HCLGEVF_MISC_VECTOR_REG_BASE 0x20400 +#define HCLGEVF_VECTOR_REG_OFFSET 0x4 +#define HCLGEVF_VECTOR_VF_OFFSET 0x100000 + +/* Vector0 interrupt CMDQ event source register(RW) */ +#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100 +/* CMDQ register bits for RX event(=MBX event) */ +#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1 + +#define HCLGEVF_TQP_RESET_TRY_TIMES 10 + +#define HCLGEVF_RSS_IND_TBL_SIZE 512 +#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff +#define HCLGEVF_RSS_KEY_SIZE 40 +#define HCLGEVF_RSS_HASH_ALGO_TOEPLITZ 0 +#define HCLGEVF_RSS_HASH_ALGO_SIMPLE 1 +#define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC 2 +#define HCLGEVF_RSS_HASH_ALGO_MASK 0xf +#define HCLGEVF_RSS_CFG_TBL_NUM \ + (HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE) + +/* states of hclgevf device & tasks */ +enum hclgevf_states { + /* device states */ + HCLGEVF_STATE_DOWN, + HCLGEVF_STATE_DISABLED, + /* task states */ + HCLGEVF_STATE_SERVICE_SCHED, + HCLGEVF_STATE_MBX_SERVICE_SCHED, + HCLGEVF_STATE_MBX_HANDLING, +}; + +#define HCLGEVF_MPF_ENBALE 1 + +struct hclgevf_mac { + u8 mac_addr[ETH_ALEN]; + int link; +}; + +struct hclgevf_hw { + void __iomem *io_base; + int num_vec; + struct hclgevf_cmq cmq; + struct hclgevf_mac mac; + void *hdev; /* hchgevf device it is part of */ +}; + +/* TQP stats */ +struct hlcgevf_tqp_stats { + /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */ + u64 rcb_tx_ring_pktnum_rcd; /* 32bit */ + /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */ + u64 rcb_rx_ring_pktnum_rcd; /* 32bit */ +}; + +struct hclgevf_tqp { + struct device *dev; /* device for DMA mapping */ + struct hnae3_queue q; + struct hlcgevf_tqp_stats tqp_stats; + u16 index; /* global index in a NIC controller */ + + bool alloced; +}; + +struct hclgevf_cfg { + u8 vmdq_vport_num; + u8 tc_num; + u16 tqp_desc_num; + u16 rx_buf_len; + u8 phy_addr; + u8 media_type; + u8 mac_addr[ETH_ALEN]; + u32 numa_node_map; +}; + +struct hclgevf_rss_cfg { + u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */ + u32 hash_algo; + u32 rss_size; + u8 hw_tc_map; + u8 rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */ +}; + +struct hclgevf_misc_vector { + u8 __iomem *addr; + int vector_irq; +}; + +struct hclgevf_dev { + struct pci_dev *pdev; + struct hnae3_ae_dev *ae_dev; + struct hclgevf_hw hw; + struct hclgevf_misc_vector misc_vector; + struct hclgevf_rss_cfg rss_cfg; + unsigned long state; + + u32 fw_version; + u16 num_tqps; /* num task queue pairs of this PF */ + + u16 alloc_rss_size; /* allocated RSS task queue */ + u16 rss_size_max; /* HW defined max RSS task queue */ + + u16 num_alloc_vport; /* num vports this driver supports */ + u32 numa_node_mask; + u16 rx_buf_len; + u16 num_desc; + u8 hw_tc_map; + + u16 num_msi; + u16 num_msi_left; + u16 num_msi_used; + u32 base_msi_vector; + u16 *vector_status; + int *vector_irq; + + bool accept_mta_mc; /* whether to accept mta filter multicast */ + struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */ + + struct timer_list service_timer; + struct work_struct service_task; + struct work_struct mbx_service_task; + + struct hclgevf_tqp *htqp; + + struct hnae3_handle nic; + struct hnae3_handle roce; + + struct hnae3_client *nic_client; + struct hnae3_client *roce_client; + u32 flag; +}; + +int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode, + const u8 *msg_data, u8 msg_len, bool need_resp, + u8 *resp_data, u16 resp_len); +void hclgevf_mbx_handler(struct hclgevf_dev *hdev); +void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state); +#endif -- cgit From e963cb789a29b890678b58ef7da5d7c497510b7e Mon Sep 17 00:00:00 2001 From: Salil Mehta Date: Thu, 14 Dec 2017 18:03:05 +0000 Subject: net: hns3: Add HNS3 VF driver to kernel build framework This patch introduces the new Makefiles and updates existing Makefiles required to build the HNS3 Virtual Function driver. This also updates the Kconfig for introduction of new menuconfig entries related to VF driver. Signed-off-by: Salil Mehta Signed-off-by: lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/Kconfig | 28 +++++++++++++++------- drivers/net/ethernet/hisilicon/hns3/Makefile | 2 ++ .../net/ethernet/hisilicon/hns3/hns3vf/Makefile | 9 +++++++ 3 files changed, 30 insertions(+), 9 deletions(-) create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index 30000b6aa7b8..8bcf470ff5f3 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -94,15 +94,6 @@ config HNS3_HCLGE compatibility layer. The engine would be used in Hisilicon hip08 family of SoCs and further upcoming SoCs. -config HNS3_ENET - tristate "Hisilicon HNS3 Ethernet Device Support" - depends on 64BIT && PCI - depends on HNS3 && HNS3_HCLGE - ---help--- - This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08 - family of SoCs. This module depends upon HNAE3 driver to access the HNAE3 - devices and their associated operations. - config HNS3_DCB bool "Hisilicon HNS3 Data Center Bridge Support" default n @@ -112,4 +103,23 @@ config HNS3_DCB If unsure, say N. +config HNS3_HCLGEVF + tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support" + depends on PCI_MSI + depends on HNS3 + depends on HNS3_HCLGE + ---help--- + This selects the HNS3 VF drivers network acceleration engine & its hardware + compatibility layer. The engine would be used in Hisilicon hip08 family of + SoCs and further upcoming SoCs. + +config HNS3_ENET + tristate "Hisilicon HNS3 Ethernet Device Support" + depends on 64BIT && PCI + depends on HNS3 + ---help--- + This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08 + family of SoCs. This module depends upon HNAE3 driver to access the HNAE3 + devices and their associated operations. + endif # NET_VENDOR_HISILICON diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile index a9349e1f3e51..c45094513c87 100644 --- a/drivers/net/ethernet/hisilicon/hns3/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/Makefile @@ -1,7 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0+ # # Makefile for the HISILICON network device drivers. # obj-$(CONFIG_HNS3) += hns3pf/ +obj-$(CONFIG_HNS3) += hns3vf/ obj-$(CONFIG_HNS3) += hnae3.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile new file mode 100644 index 000000000000..fb93bbd35845 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# Makefile for the HISILICON network device drivers. +# + +ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 + +obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o +hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file -- cgit From 424eb834a9be49273c4b32d0d6395dfdbe768a1a Mon Sep 17 00:00:00 2001 From: Salil Mehta Date: Thu, 14 Dec 2017 18:03:06 +0000 Subject: net: hns3: Unified HNS3 {VF|PF} Ethernet Driver for hip08 SoC Most of the NAPI handling interface, skb buffer management, management of the RX/TX descriptors, ethool interface etc. has quite a bit of code which is common to VF and PF driver. This patch makes the exisitng PF's HNS3 ENET driver as the common ENET driver for both Virtual & Physical Function. This will help in reduction of redundancy and better management of code. Signed-off-by: Salil Mehta Signed-off-by: lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/Makefile | 5 + drivers/net/ethernet/hisilicon/hns3/hnae3.c | 14 +- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 7 +- drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c | 100 + drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 3216 ++++++++++++++++++++ drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 613 ++++ drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 896 ++++++ .../net/ethernet/hisilicon/hns3/hns3pf/Makefile | 5 - .../ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c | 100 - .../net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 3214 ------------------- .../net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h | 613 ---- .../ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c | 876 ------ .../ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 1 + 13 files changed, 4847 insertions(+), 4813 deletions(-) create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c delete mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c delete mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c delete mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h delete mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile index c45094513c87..002534f12b66 100644 --- a/drivers/net/ethernet/hisilicon/hns3/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/Makefile @@ -7,3 +7,8 @@ obj-$(CONFIG_HNS3) += hns3pf/ obj-$(CONFIG_HNS3) += hns3vf/ obj-$(CONFIG_HNS3) += hnae3.o + +obj-$(CONFIG_HNS3_ENET) += hns3.o +hns3-objs = hns3_enet.o hns3_ethtool.o + +hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 5bcb2238acb2..02145f2de820 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -196,9 +196,18 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) const struct pci_device_id *id; struct hnae3_ae_algo *ae_algo; struct hnae3_client *client; - int ret = 0; + int ret = 0, lock_acquired; + + /* we can get deadlocked if SRIOV is being enabled in context to probe + * and probe gets called again in same context. This can happen when + * pci_enable_sriov() is called to create VFs from PF probes context. + * Therefore, for simplicity uniformly defering further probing in all + * cases where we detect contention. + */ + lock_acquired = mutex_trylock(&hnae3_common_lock); + if (!lock_acquired) + return -EPROBE_DEFER; - mutex_lock(&hnae3_common_lock); list_add_tail(&ae_dev->node, &hnae3_ae_dev_list); /* Check if there are matched ae_algo */ @@ -211,6 +220,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) if (!ae_dev->ops) { dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n"); + ret = -EOPNOTSUPP; goto out_err; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 67c59e1039f2..a9e2b32834c5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -452,9 +452,10 @@ struct hnae3_unic_private_info { struct hnae3_queue **tqp; /* array base of all TQPs of this instance */ }; -#define HNAE3_SUPPORT_MAC_LOOPBACK 1 -#define HNAE3_SUPPORT_PHY_LOOPBACK 2 -#define HNAE3_SUPPORT_SERDES_LOOPBACK 4 +#define HNAE3_SUPPORT_MAC_LOOPBACK BIT(0) +#define HNAE3_SUPPORT_PHY_LOOPBACK BIT(1) +#define HNAE3_SUPPORT_SERDES_LOOPBACK BIT(2) +#define HNAE3_SUPPORT_VF BIT(3) struct hnae3_handle { struct hnae3_client *client; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c new file mode 100644 index 000000000000..eb82700da7d0 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2016-2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include "hnae3.h" +#include "hns3_enet.h" + +static +int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (h->kinfo.dcb_ops->ieee_getets) + return h->kinfo.dcb_ops->ieee_getets(h, ets); + + return -EOPNOTSUPP; +} + +static +int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (h->kinfo.dcb_ops->ieee_setets) + return h->kinfo.dcb_ops->ieee_setets(h, ets); + + return -EOPNOTSUPP; +} + +static +int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (h->kinfo.dcb_ops->ieee_getpfc) + return h->kinfo.dcb_ops->ieee_getpfc(h, pfc); + + return -EOPNOTSUPP; +} + +static +int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (h->kinfo.dcb_ops->ieee_setpfc) + return h->kinfo.dcb_ops->ieee_setpfc(h, pfc); + + return -EOPNOTSUPP; +} + +/* DCBX configuration */ +static u8 hns3_dcbnl_getdcbx(struct net_device *ndev) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (h->kinfo.dcb_ops->getdcbx) + return h->kinfo.dcb_ops->getdcbx(h); + + return 0; +} + +/* return 0 if successful, otherwise fail */ +static u8 hns3_dcbnl_setdcbx(struct net_device *ndev, u8 mode) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (h->kinfo.dcb_ops->setdcbx) + return h->kinfo.dcb_ops->setdcbx(h, mode); + + return 1; +} + +static const struct dcbnl_rtnl_ops hns3_dcbnl_ops = { + .ieee_getets = hns3_dcbnl_ieee_getets, + .ieee_setets = hns3_dcbnl_ieee_setets, + .ieee_getpfc = hns3_dcbnl_ieee_getpfc, + .ieee_setpfc = hns3_dcbnl_ieee_setpfc, + .getdcbx = hns3_dcbnl_getdcbx, + .setdcbx = hns3_dcbnl_setdcbx, +}; + +/* hclge_dcbnl_setup - DCBNL setup + * @handle: the corresponding vport handle + * Set up DCBNL + */ +void hns3_dcbnl_setup(struct hnae3_handle *handle) +{ + struct net_device *dev = handle->kinfo.netdev; + + if ((!handle->kinfo.dcb_ops) || (handle->flags & HNAE3_SUPPORT_VF)) + return; + + dev->dcbnl_ops = &hns3_dcbnl_ops; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c new file mode 100644 index 000000000000..c2c13238db52 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -0,0 +1,3216 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hnae3.h" +#include "hns3_enet.h" + +static const char hns3_driver_name[] = "hns3"; +const char hns3_driver_version[] = VERMAGIC_STRING; +static const char hns3_driver_string[] = + "Hisilicon Ethernet Network Driver for Hip08 Family"; +static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; +static struct hnae3_client client; + +/* hns3_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id hns3_pci_tbl[] = { + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, + /* required last entry */ + {0, } +}; +MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); + +static irqreturn_t hns3_irq_handle(int irq, void *dev) +{ + struct hns3_enet_tqp_vector *tqp_vector = dev; + + napi_schedule(&tqp_vector->napi); + + return IRQ_HANDLED; +} + +static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) +{ + struct hns3_enet_tqp_vector *tqp_vectors; + unsigned int i; + + for (i = 0; i < priv->vector_num; i++) { + tqp_vectors = &priv->tqp_vector[i]; + + if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) + continue; + + /* release the irq resource */ + free_irq(tqp_vectors->vector_irq, tqp_vectors); + tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; + } +} + +static int hns3_nic_init_irq(struct hns3_nic_priv *priv) +{ + struct hns3_enet_tqp_vector *tqp_vectors; + int txrx_int_idx = 0; + int rx_int_idx = 0; + int tx_int_idx = 0; + unsigned int i; + int ret; + + for (i = 0; i < priv->vector_num; i++) { + tqp_vectors = &priv->tqp_vector[i]; + + if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) + continue; + + if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, + "%s-%s-%d", priv->netdev->name, "TxRx", + txrx_int_idx++); + txrx_int_idx++; + } else if (tqp_vectors->rx_group.ring) { + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, + "%s-%s-%d", priv->netdev->name, "Rx", + rx_int_idx++); + } else if (tqp_vectors->tx_group.ring) { + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, + "%s-%s-%d", priv->netdev->name, "Tx", + tx_int_idx++); + } else { + /* Skip this unused q_vector */ + continue; + } + + tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; + + ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, + tqp_vectors->name, + tqp_vectors); + if (ret) { + netdev_err(priv->netdev, "request irq(%d) fail\n", + tqp_vectors->vector_irq); + return ret; + } + + tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; + } + + return 0; +} + +static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, + u32 mask_en) +{ + writel(mask_en, tqp_vector->mask_addr); +} + +static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) +{ + napi_enable(&tqp_vector->napi); + + /* enable vector */ + hns3_mask_vector_irq(tqp_vector, 1); +} + +static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) +{ + /* disable vector */ + hns3_mask_vector_irq(tqp_vector, 0); + + disable_irq(tqp_vector->vector_irq); + napi_disable(&tqp_vector->napi); +} + +static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector, + u32 gl_value) +{ + /* this defines the configuration for GL (Interrupt Gap Limiter) + * GL defines inter interrupt gap. + * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing + */ + writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); + writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); + writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET); +} + +static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector, + u32 rl_value) +{ + /* this defines the configuration for RL (Interrupt Rate Limiter). + * Rl defines rate of interrupts i.e. number of interrupts-per-second + * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing + */ + writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); +} + +static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector) +{ + /* initialize the configuration for interrupt coalescing. + * 1. GL (Interrupt Gap Limiter) + * 2. RL (Interrupt Rate Limiter) + */ + + /* Default :enable interrupt coalesce */ + tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K; + tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K; + hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K); + /* for now we are disabling Interrupt RL - we + * will re-enable later + */ + hns3_set_vector_coalesc_rl(tqp_vector, 0); + tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW; + tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW; +} + +static int hns3_nic_set_real_num_queue(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_knic_private_info *kinfo = &h->kinfo; + unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; + int ret; + + ret = netif_set_real_num_tx_queues(netdev, queue_size); + if (ret) { + netdev_err(netdev, + "netif_set_real_num_tx_queues fail, ret=%d!\n", + ret); + return ret; + } + + ret = netif_set_real_num_rx_queues(netdev, queue_size); + if (ret) { + netdev_err(netdev, + "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); + return ret; + } + + return 0; +} + +static int hns3_nic_net_up(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int i, j; + int ret; + + /* get irq resource for all vectors */ + ret = hns3_nic_init_irq(priv); + if (ret) { + netdev_err(netdev, "hns init irq failed! ret=%d\n", ret); + return ret; + } + + /* enable the vectors */ + for (i = 0; i < priv->vector_num; i++) + hns3_vector_enable(&priv->tqp_vector[i]); + + /* start the ae_dev */ + ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; + if (ret) + goto out_start_err; + + return 0; + +out_start_err: + for (j = i - 1; j >= 0; j--) + hns3_vector_disable(&priv->tqp_vector[j]); + + hns3_nic_uninit_irq(priv); + + return ret; +} + +static int hns3_nic_net_open(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + int ret; + + netif_carrier_off(netdev); + + ret = hns3_nic_set_real_num_queue(netdev); + if (ret) + return ret; + + ret = hns3_nic_net_up(netdev); + if (ret) { + netdev_err(netdev, + "hns net up fail, ret=%d!\n", ret); + return ret; + } + + priv->last_reset_time = jiffies; + return 0; +} + +static void hns3_nic_net_down(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + const struct hnae3_ae_ops *ops; + int i; + + /* stop ae_dev */ + ops = priv->ae_handle->ae_algo->ops; + if (ops->stop) + ops->stop(priv->ae_handle); + + /* disable vectors */ + for (i = 0; i < priv->vector_num; i++) + hns3_vector_disable(&priv->tqp_vector[i]); + + /* free irq resources */ + hns3_nic_uninit_irq(priv); +} + +static int hns3_nic_net_stop(struct net_device *netdev) +{ + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + + hns3_nic_net_down(netdev); + + return 0; +} + +static int hns3_nic_uc_sync(struct net_device *netdev, + const unsigned char *addr) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->add_uc_addr) + return h->ae_algo->ops->add_uc_addr(h, addr); + + return 0; +} + +static int hns3_nic_uc_unsync(struct net_device *netdev, + const unsigned char *addr) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->rm_uc_addr) + return h->ae_algo->ops->rm_uc_addr(h, addr); + + return 0; +} + +static int hns3_nic_mc_sync(struct net_device *netdev, + const unsigned char *addr) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->add_mc_addr) + return h->ae_algo->ops->add_mc_addr(h, addr); + + return 0; +} + +static int hns3_nic_mc_unsync(struct net_device *netdev, + const unsigned char *addr) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->rm_mc_addr) + return h->ae_algo->ops->rm_mc_addr(h, addr); + + return 0; +} + +static void hns3_nic_set_rx_mode(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->set_promisc_mode) { + if (netdev->flags & IFF_PROMISC) + h->ae_algo->ops->set_promisc_mode(h, 1); + else + h->ae_algo->ops->set_promisc_mode(h, 0); + } + if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync)) + netdev_err(netdev, "sync uc address fail\n"); + if (netdev->flags & IFF_MULTICAST) + if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync)) + netdev_err(netdev, "sync mc address fail\n"); +} + +static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, + u16 *mss, u32 *type_cs_vlan_tso) +{ + u32 l4_offset, hdr_len; + union l3_hdr_info l3; + union l4_hdr_info l4; + u32 l4_paylen; + int ret; + + if (!skb_is_gso(skb)) + return 0; + + ret = skb_cow_head(skb, 0); + if (ret) + return ret; + + l3.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* Software should clear the IPv4's checksum field when tso is + * needed. + */ + if (l3.v4->version == 4) + l3.v4->check = 0; + + /* tunnel packet.*/ + if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | + SKB_GSO_GRE_CSUM | + SKB_GSO_UDP_TUNNEL | + SKB_GSO_UDP_TUNNEL_CSUM)) { + if ((!(skb_shinfo(skb)->gso_type & + SKB_GSO_PARTIAL)) && + (skb_shinfo(skb)->gso_type & + SKB_GSO_UDP_TUNNEL_CSUM)) { + /* Software should clear the udp's checksum + * field when tso is needed. + */ + l4.udp->check = 0; + } + /* reset l3&l4 pointers from outer to inner headers */ + l3.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + + /* Software should clear the IPv4's checksum field when + * tso is needed. + */ + if (l3.v4->version == 4) + l3.v4->check = 0; + } + + /* normal or tunnel packet*/ + l4_offset = l4.hdr - skb->data; + hdr_len = (l4.tcp->doff * 4) + l4_offset; + + /* remove payload length from inner pseudo checksum when tso*/ + l4_paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(l4_paylen)); + + /* find the txbd field values */ + *paylen = skb->len - hdr_len; + hnae_set_bit(*type_cs_vlan_tso, + HNS3_TXD_TSO_B, 1); + + /* get MSS for TSO */ + *mss = skb_shinfo(skb)->gso_size; + + return 0; +} + +static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, + u8 *il4_proto) +{ + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } l3; + unsigned char *l4_hdr; + unsigned char *exthdr; + u8 l4_proto_tmp; + __be16 frag_off; + + /* find outer header point */ + l3.hdr = skb_network_header(skb); + l4_hdr = skb_inner_transport_header(skb); + + if (skb->protocol == htons(ETH_P_IPV6)) { + exthdr = l3.hdr + sizeof(*l3.v6); + l4_proto_tmp = l3.v6->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto_tmp, &frag_off); + } else if (skb->protocol == htons(ETH_P_IP)) { + l4_proto_tmp = l3.v4->protocol; + } else { + return -EINVAL; + } + + *ol4_proto = l4_proto_tmp; + + /* tunnel packet */ + if (!skb->encapsulation) { + *il4_proto = 0; + return 0; + } + + /* find inner header point */ + l3.hdr = skb_inner_network_header(skb); + l4_hdr = skb_inner_transport_header(skb); + + if (l3.v6->version == 6) { + exthdr = l3.hdr + sizeof(*l3.v6); + l4_proto_tmp = l3.v6->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto_tmp, &frag_off); + } else if (l3.v4->version == 4) { + l4_proto_tmp = l3.v4->protocol; + } + + *il4_proto = l4_proto_tmp; + + return 0; +} + +static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, + u8 il4_proto, u32 *type_cs_vlan_tso, + u32 *ol_type_vlan_len_msec) +{ + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } l3; + union { + struct tcphdr *tcp; + struct udphdr *udp; + struct gre_base_hdr *gre; + unsigned char *hdr; + } l4; + unsigned char *l2_hdr; + u8 l4_proto = ol4_proto; + u32 ol2_len; + u32 ol3_len; + u32 ol4_len; + u32 l2_len; + u32 l3_len; + + l3.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* compute L2 header size for normal packet, defined in 2 Bytes */ + l2_len = l3.hdr - skb->data; + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, l2_len >> 1); + + /* tunnel packet*/ + if (skb->encapsulation) { + /* compute OL2 header size, defined in 2 Bytes */ + ol2_len = l2_len; + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, ol2_len >> 1); + + /* compute OL3 header size, defined in 4 Bytes */ + ol3_len = l4.hdr - l3.hdr; + hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, + HNS3_TXD_L3LEN_S, ol3_len >> 2); + + /* MAC in UDP, MAC in GRE (0x6558)*/ + if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { + /* switch MAC header ptr from outer to inner header.*/ + l2_hdr = skb_inner_mac_header(skb); + + /* compute OL4 header size, defined in 4 Bytes. */ + ol4_len = l2_hdr - l4.hdr; + hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, ol4_len >> 2); + + /* switch IP header ptr from outer to inner header */ + l3.hdr = skb_inner_network_header(skb); + + /* compute inner l2 header size, defined in 2 Bytes. */ + l2_len = l3.hdr - l2_hdr; + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, l2_len >> 1); + } else { + /* skb packet types not supported by hardware, + * txbd len fild doesn't be filled. + */ + return; + } + + /* switch L4 header pointer from outer to inner */ + l4.hdr = skb_inner_transport_header(skb); + + l4_proto = il4_proto; + } + + /* compute inner(/normal) L3 header size, defined in 4 Bytes */ + l3_len = l4.hdr - l3.hdr; + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, + HNS3_TXD_L3LEN_S, l3_len >> 2); + + /* compute inner(/normal) L4 header size, defined in 4 Bytes */ + switch (l4_proto) { + case IPPROTO_TCP: + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, l4.tcp->doff); + break; + case IPPROTO_SCTP: + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2)); + break; + case IPPROTO_UDP: + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2)); + break; + default: + /* skb packet types not supported by hardware, + * txbd len fild doesn't be filled. + */ + return; + } +} + +static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, + u8 il4_proto, u32 *type_cs_vlan_tso, + u32 *ol_type_vlan_len_msec) +{ + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } l3; + u32 l4_proto = ol4_proto; + + l3.hdr = skb_network_header(skb); + + /* define OL3 type and tunnel type(OL4).*/ + if (skb->encapsulation) { + /* define outer network header type.*/ + if (skb->protocol == htons(ETH_P_IP)) { + if (skb_is_gso(skb)) + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_CSUM); + else + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_NO_CSUM); + + } else if (skb->protocol == htons(ETH_P_IPV6)) { + hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, + HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); + } + + /* define tunnel type(OL4).*/ + switch (l4_proto) { + case IPPROTO_UDP: + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_TUNTYPE_M, + HNS3_TXD_TUNTYPE_S, + HNS3_TUN_MAC_IN_UDP); + break; + case IPPROTO_GRE: + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_TUNTYPE_M, + HNS3_TXD_TUNTYPE_S, + HNS3_TUN_NVGRE); + break; + default: + /* drop the skb tunnel packet if hardware don't support, + * because hardware can't calculate csum when TSO. + */ + if (skb_is_gso(skb)) + return -EDOM; + + /* the stack computes the IP header already, + * driver calculate l4 checksum when not TSO. + */ + skb_checksum_help(skb); + return 0; + } + + l3.hdr = skb_inner_network_header(skb); + l4_proto = il4_proto; + } + + if (l3.v4->version == 4) { + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, + HNS3_TXD_L3T_S, HNS3_L3T_IPV4); + + /* the stack computes the IP header already, the only time we + * need the hardware to recompute it is in the case of TSO. + */ + if (skb_is_gso(skb)) + hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); + + hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + } else if (l3.v6->version == 6) { + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, + HNS3_TXD_L3T_S, HNS3_L3T_IPV6); + hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + } + + switch (l4_proto) { + case IPPROTO_TCP: + hnae_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_TCP); + break; + case IPPROTO_UDP: + hnae_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_UDP); + break; + case IPPROTO_SCTP: + hnae_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_SCTP); + break; + default: + /* drop the skb tunnel packet if hardware don't support, + * because hardware can't calculate csum when TSO. + */ + if (skb_is_gso(skb)) + return -EDOM; + + /* the stack computes the IP header already, + * driver calculate l4 checksum when not TSO. + */ + skb_checksum_help(skb); + return 0; + } + + return 0; +} + +static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) +{ + /* Config bd buffer end */ + hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, + HNS3_TXD_BDTYPE_M, 0); + hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); + hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); + hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); +} + +static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + enum hns_desc_type type) +{ + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; + struct hns3_desc *desc = &ring->desc[ring->next_to_use]; + u32 ol_type_vlan_len_msec = 0; + u16 bdtp_fe_sc_vld_ra_ri = 0; + u32 type_cs_vlan_tso = 0; + struct sk_buff *skb; + u32 paylen = 0; + u16 mss = 0; + __be16 protocol; + u8 ol4_proto; + u8 il4_proto; + int ret; + + /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ + desc_cb->priv = priv; + desc_cb->length = size; + desc_cb->dma = dma; + desc_cb->type = type; + + /* now, fill the descriptor */ + desc->addr = cpu_to_le64(dma); + desc->tx.send_size = cpu_to_le16((u16)size); + hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end); + desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri); + + if (type == DESC_TYPE_SKB) { + skb = (struct sk_buff *)priv; + paylen = skb->len; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + skb_reset_mac_len(skb); + protocol = skb->protocol; + + /* vlan packet*/ + if (protocol == htons(ETH_P_8021Q)) { + protocol = vlan_get_protocol(skb); + skb->protocol = protocol; + } + ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); + if (ret) + return ret; + hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto, + &type_cs_vlan_tso, + &ol_type_vlan_len_msec); + ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto, + &type_cs_vlan_tso, + &ol_type_vlan_len_msec); + if (ret) + return ret; + + ret = hns3_set_tso(skb, &paylen, &mss, + &type_cs_vlan_tso); + if (ret) + return ret; + } + + /* Set txbd */ + desc->tx.ol_type_vlan_len_msec = + cpu_to_le32(ol_type_vlan_len_msec); + desc->tx.type_cs_vlan_tso_len = + cpu_to_le32(type_cs_vlan_tso); + desc->tx.paylen = cpu_to_le32(paylen); + desc->tx.mss = cpu_to_le16(mss); + } + + /* move ring pointer to next.*/ + ring_ptr_move_fw(ring, next_to_use); + + return 0; +} + +static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + enum hns_desc_type type) +{ + unsigned int frag_buf_num; + unsigned int k; + int sizeoflast; + int ret; + + frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + sizeoflast = size % HNS3_MAX_BD_SIZE; + sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; + + /* When the frag size is bigger than hardware, split this frag */ + for (k = 0; k < frag_buf_num; k++) { + ret = hns3_fill_desc(ring, priv, + (k == frag_buf_num - 1) ? + sizeoflast : HNS3_MAX_BD_SIZE, + dma + HNS3_MAX_BD_SIZE * k, + frag_end && (k == frag_buf_num - 1) ? 1 : 0, + (type == DESC_TYPE_SKB && !k) ? + DESC_TYPE_SKB : DESC_TYPE_PAGE); + if (ret) + return ret; + } + + return 0; +} + +static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, + struct hns3_enet_ring *ring) +{ + struct sk_buff *skb = *out_skb; + struct skb_frag_struct *frag; + int bdnum_for_frag; + int frag_num; + int buf_num; + int size; + int i; + + size = skb_headlen(skb); + buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + + frag_num = skb_shinfo(skb)->nr_frags; + for (i = 0; i < frag_num; i++) { + frag = &skb_shinfo(skb)->frags[i]; + size = skb_frag_size(frag); + bdnum_for_frag = + (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG) + return -ENOMEM; + + buf_num += bdnum_for_frag; + } + + if (buf_num > ring_space(ring)) + return -EBUSY; + + *bnum = buf_num; + return 0; +} + +static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, + struct hns3_enet_ring *ring) +{ + struct sk_buff *skb = *out_skb; + int buf_num; + + /* No. of segments (plus a header) */ + buf_num = skb_shinfo(skb)->nr_frags + 1; + + if (buf_num > ring_space(ring)) + return -EBUSY; + + *bnum = buf_num; + + return 0; +} + +static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig) +{ + struct device *dev = ring_to_dev(ring); + unsigned int i; + + for (i = 0; i < ring->desc_num; i++) { + /* check if this is where we started */ + if (ring->next_to_use == next_to_use_orig) + break; + + /* unmap the descriptor dma address */ + if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB) + dma_unmap_single(dev, + ring->desc_cb[ring->next_to_use].dma, + ring->desc_cb[ring->next_to_use].length, + DMA_TO_DEVICE); + else + dma_unmap_page(dev, + ring->desc_cb[ring->next_to_use].dma, + ring->desc_cb[ring->next_to_use].length, + DMA_TO_DEVICE); + + /* rollback one */ + ring_ptr_move_bw(ring, next_to_use); + } +} + +netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hns3_nic_ring_data *ring_data = + &tx_ring_data(priv, skb->queue_mapping); + struct hns3_enet_ring *ring = ring_data->ring; + struct device *dev = priv->dev; + struct netdev_queue *dev_queue; + struct skb_frag_struct *frag; + int next_to_use_head; + int next_to_use_frag; + dma_addr_t dma; + int buf_num; + int seg_num; + int size; + int ret; + int i; + + /* Prefetch the data used later */ + prefetch(skb->data); + + switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { + case -EBUSY: + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_busy++; + u64_stats_update_end(&ring->syncp); + + goto out_net_tx_busy; + case -ENOMEM: + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + netdev_err(netdev, "no memory to xmit!\n"); + + goto out_err_tx_ok; + default: + break; + } + + /* No. of segments (plus a header) */ + seg_num = skb_shinfo(skb)->nr_frags + 1; + /* Fill the first part */ + size = skb_headlen(skb); + + next_to_use_head = ring->next_to_use; + + dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma)) { + netdev_err(netdev, "TX head DMA map failed\n"); + ring->stats.sw_err_cnt++; + goto out_err_tx_ok; + } + + ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, + DESC_TYPE_SKB); + if (ret) + goto head_dma_map_err; + + next_to_use_frag = ring->next_to_use; + /* Fill the fragments */ + for (i = 1; i < seg_num; i++) { + frag = &skb_shinfo(skb)->frags[i - 1]; + size = skb_frag_size(frag); + dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma)) { + netdev_err(netdev, "TX frag(%d) DMA map failed\n", i); + ring->stats.sw_err_cnt++; + goto frag_dma_map_err; + } + ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, + seg_num - 1 == i ? 1 : 0, + DESC_TYPE_PAGE); + + if (ret) + goto frag_dma_map_err; + } + + /* Complete translate all packets */ + dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index); + netdev_tx_sent_queue(dev_queue, skb->len); + + wmb(); /* Commit all data before submit */ + + hnae_queue_xmit(ring->tqp, buf_num); + + return NETDEV_TX_OK; + +frag_dma_map_err: + hns_nic_dma_unmap(ring, next_to_use_frag); + +head_dma_map_err: + hns_nic_dma_unmap(ring, next_to_use_head); + +out_err_tx_ok: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + +out_net_tx_busy: + netif_stop_subqueue(netdev, ring_data->queue_index); + smp_mb(); /* Commit all data before submit */ + + return NETDEV_TX_BUSY; +} + +static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + struct sockaddr *mac_addr = p; + int ret; + + if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) + return -EADDRNOTAVAIL; + + ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data); + if (ret) { + netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); + return ret; + } + + ether_addr_copy(netdev->dev_addr, mac_addr->sa_data); + + return 0; +} + +static int hns3_nic_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { + priv->ops.fill_desc = hns3_fill_desc_tso; + priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; + } else { + priv->ops.fill_desc = hns3_fill_desc; + priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; + } + + netdev->features = features; + return 0; +} + +static void +hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + int queue_num = priv->ae_handle->kinfo.num_tqps; + struct hns3_enet_ring *ring; + unsigned int start; + unsigned int idx; + u64 tx_bytes = 0; + u64 rx_bytes = 0; + u64 tx_pkts = 0; + u64 rx_pkts = 0; + + for (idx = 0; idx < queue_num; idx++) { + /* fetch the tx stats */ + ring = priv->ring_data[idx].ring; + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + tx_bytes += ring->stats.tx_bytes; + tx_pkts += ring->stats.tx_pkts; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + + /* fetch the rx stats */ + ring = priv->ring_data[idx + queue_num].ring; + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + rx_bytes += ring->stats.rx_bytes; + rx_pkts += ring->stats.rx_pkts; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + } + + stats->tx_bytes = tx_bytes; + stats->tx_packets = tx_pkts; + stats->rx_bytes = rx_bytes; + stats->rx_packets = rx_pkts; + + stats->rx_errors = netdev->stats.rx_errors; + stats->multicast = netdev->stats.multicast; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; + + stats->tx_errors = netdev->stats.tx_errors; + stats->rx_dropped = netdev->stats.rx_dropped; + stats->tx_dropped = netdev->stats.tx_dropped; + stats->collisions = netdev->stats.collisions; + stats->rx_over_errors = netdev->stats.rx_over_errors; + stats->rx_frame_errors = netdev->stats.rx_frame_errors; + stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; + stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; + stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; + stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; + stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; + stats->tx_window_errors = netdev->stats.tx_window_errors; + stats->rx_compressed = netdev->stats.rx_compressed; + stats->tx_compressed = netdev->stats.tx_compressed; +} + +static void hns3_add_tunnel_port(struct net_device *netdev, u16 port, + enum hns3_udp_tnl_type type) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type]; + struct hnae3_handle *h = priv->ae_handle; + + if (udp_tnl->used && udp_tnl->dst_port == port) { + udp_tnl->used++; + return; + } + + if (udp_tnl->used) { + netdev_warn(netdev, + "UDP tunnel [%d], port [%d] offload\n", type, port); + return; + } + + udp_tnl->dst_port = port; + udp_tnl->used = 1; + /* TBD send command to hardware to add port */ + if (h->ae_algo->ops->add_tunnel_udp) + h->ae_algo->ops->add_tunnel_udp(h, port); +} + +static void hns3_del_tunnel_port(struct net_device *netdev, u16 port, + enum hns3_udp_tnl_type type) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type]; + struct hnae3_handle *h = priv->ae_handle; + + if (!udp_tnl->used || udp_tnl->dst_port != port) { + netdev_warn(netdev, + "Invalid UDP tunnel port %d\n", port); + return; + } + + udp_tnl->used--; + if (udp_tnl->used) + return; + + udp_tnl->dst_port = 0; + /* TBD send command to hardware to del port */ + if (h->ae_algo->ops->del_tunnel_udp) + h->ae_algo->ops->del_tunnel_udp(h, port); +} + +/* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports + * @netdev: This physical ports's netdev + * @ti: Tunnel information + */ +static void hns3_nic_udp_tunnel_add(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + u16 port_n = ntohs(ti->port); + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN); + break; + case UDP_TUNNEL_TYPE_GENEVE: + hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE); + break; + default: + netdev_err(netdev, "unsupported tunnel type %d\n", ti->type); + break; + } +} + +static void hns3_nic_udp_tunnel_del(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + u16 port_n = ntohs(ti->port); + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN); + break; + case UDP_TUNNEL_TYPE_GENEVE: + hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE); + break; + default: + break; + } +} + +static int hns3_setup_tc(struct net_device *netdev, void *type_data) +{ + struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_knic_private_info *kinfo = &h->kinfo; + u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map; + u8 tc = mqprio_qopt->qopt.num_tc; + u16 mode = mqprio_qopt->mode; + u8 hw = mqprio_qopt->qopt.hw; + bool if_running; + unsigned int i; + int ret; + + if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && + mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) + return -EOPNOTSUPP; + + if (tc > HNAE3_MAX_TC) + return -EINVAL; + + if (!netdev) + return -EINVAL; + + if_running = netif_running(netdev); + if (if_running) { + hns3_nic_net_stop(netdev); + msleep(100); + } + + ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? + kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP; + if (ret) + goto out; + + if (tc <= 1) { + netdev_reset_tc(netdev); + } else { + ret = netdev_set_num_tc(netdev, tc); + if (ret) + goto out; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (!kinfo->tc_info[i].enable) + continue; + + netdev_set_tc_queue(netdev, + kinfo->tc_info[i].tc, + kinfo->tc_info[i].tqp_count, + kinfo->tc_info[i].tqp_offset); + } + } + + ret = hns3_nic_set_real_num_queue(netdev); + +out: + if (if_running) + hns3_nic_net_open(netdev); + + return ret; +} + +static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + if (type != TC_SETUP_QDISC_MQPRIO) + return -EOPNOTSUPP; + + return hns3_setup_tc(dev, type_data); +} + +static int hns3_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + int ret = -EIO; + + if (h->ae_algo->ops->set_vlan_filter) + ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); + + return ret; +} + +static int hns3_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + int ret = -EIO; + + if (h->ae_algo->ops->set_vlan_filter) + ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); + + return ret; +} + +static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, + u8 qos, __be16 vlan_proto) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + int ret = -EIO; + + if (h->ae_algo->ops->set_vf_vlan_filter) + ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, + qos, vlan_proto); + + return ret; +} + +static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + bool if_running = netif_running(netdev); + int ret; + + if (!h->ae_algo->ops->set_mtu) + return -EOPNOTSUPP; + + /* if this was called with netdev up then bring netdevice down */ + if (if_running) { + (void)hns3_nic_net_stop(netdev); + msleep(100); + } + + ret = h->ae_algo->ops->set_mtu(h, new_mtu); + if (ret) { + netdev_err(netdev, "failed to change MTU in hardware %d\n", + ret); + return ret; + } + + /* if the netdev was running earlier, bring it up again */ + if (if_running && hns3_nic_net_open(netdev)) + ret = -EINVAL; + + return ret; +} + +static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hns3_enet_ring *tx_ring = NULL; + int timeout_queue = 0; + int hw_head, hw_tail; + int i; + + /* Find the stopped queue the same way the stack does */ + for (i = 0; i < ndev->real_num_tx_queues; i++) { + struct netdev_queue *q; + unsigned long trans_start; + + q = netdev_get_tx_queue(ndev, i); + trans_start = q->trans_start; + if (netif_xmit_stopped(q) && + time_after(jiffies, + (trans_start + ndev->watchdog_timeo))) { + timeout_queue = i; + break; + } + } + + if (i == ndev->num_tx_queues) { + netdev_info(ndev, + "no netdev TX timeout queue found, timeout count: %llu\n", + priv->tx_timeout_count); + return false; + } + + tx_ring = priv->ring_data[timeout_queue].ring; + + hw_head = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_HEAD_REG); + hw_tail = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_TAIL_REG); + netdev_info(ndev, + "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n", + priv->tx_timeout_count, + timeout_queue, + tx_ring->next_to_use, + tx_ring->next_to_clean, + hw_head, + hw_tail, + readl(tx_ring->tqp_vector->mask_addr)); + + return true; +} + +static void hns3_nic_net_timeout(struct net_device *ndev) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + unsigned long last_reset_time = priv->last_reset_time; + struct hnae3_handle *h = priv->ae_handle; + + if (!hns3_get_tx_timeo_queue_info(ndev)) + return; + + priv->tx_timeout_count++; + + /* This timeout is far away enough from last timeout, + * if timeout again,set the reset type to PF reset + */ + if (time_after(jiffies, (last_reset_time + 20 * HZ))) + priv->reset_level = HNAE3_FUNC_RESET; + + /* Don't do any new action before the next timeout */ + else if (time_before(jiffies, (last_reset_time + ndev->watchdog_timeo))) + return; + + priv->last_reset_time = jiffies; + + if (h->ae_algo->ops->reset_event) + h->ae_algo->ops->reset_event(h, priv->reset_level); + + priv->reset_level++; + if (priv->reset_level > HNAE3_GLOBAL_RESET) + priv->reset_level = HNAE3_GLOBAL_RESET; +} + +static const struct net_device_ops hns3_nic_netdev_ops = { + .ndo_open = hns3_nic_net_open, + .ndo_stop = hns3_nic_net_stop, + .ndo_start_xmit = hns3_nic_net_xmit, + .ndo_tx_timeout = hns3_nic_net_timeout, + .ndo_set_mac_address = hns3_nic_net_set_mac_address, + .ndo_change_mtu = hns3_nic_change_mtu, + .ndo_set_features = hns3_nic_set_features, + .ndo_get_stats64 = hns3_nic_get_stats64, + .ndo_setup_tc = hns3_nic_setup_tc, + .ndo_set_rx_mode = hns3_nic_set_rx_mode, + .ndo_udp_tunnel_add = hns3_nic_udp_tunnel_add, + .ndo_udp_tunnel_del = hns3_nic_udp_tunnel_del, + .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, + .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, +}; + +/* hns3_probe - Device initialization routine + * @pdev: PCI device information struct + * @ent: entry in hns3_pci_tbl + * + * hns3_probe initializes a PF identified by a pci_dev structure. + * The OS initialization, configuring of the PF private structure, + * and a hardware reset occur. + * + * Returns 0 on success, negative on failure + */ +static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct hnae3_ae_dev *ae_dev; + int ret; + + ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), + GFP_KERNEL); + if (!ae_dev) { + ret = -ENOMEM; + return ret; + } + + ae_dev->pdev = pdev; + ae_dev->flag = ent->driver_data; + ae_dev->dev_type = HNAE3_DEV_KNIC; + pci_set_drvdata(pdev, ae_dev); + + return hnae3_register_ae_dev(ae_dev); +} + +/* hns3_remove - Device removal routine + * @pdev: PCI device information struct + */ +static void hns3_remove(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + hnae3_unregister_ae_dev(ae_dev); + + devm_kfree(&pdev->dev, ae_dev); + + pci_set_drvdata(pdev, NULL); +} + +static struct pci_driver hns3_driver = { + .name = hns3_driver_name, + .id_table = hns3_pci_tbl, + .probe = hns3_probe, + .remove = hns3_remove, +}; + +/* set default feature to hns3 */ +static void hns3_set_default_feature(struct net_device *netdev) +{ + netdev->priv_flags |= IFF_UNICAST_FLT; + + netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | + NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; + + netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; + + netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | + NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->vlan_features |= + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | + NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | + NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | + NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; +} + +static int hns3_alloc_buffer(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + unsigned int order = hnae_page_order(ring); + struct page *p; + + p = dev_alloc_pages(order); + if (!p) + return -ENOMEM; + + cb->priv = p; + cb->page_offset = 0; + cb->reuse_flag = 0; + cb->buf = page_address(p); + cb->length = hnae_page_size(ring); + cb->type = DESC_TYPE_PAGE; + + return 0; +} + +static void hns3_free_buffer(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + if (cb->type == DESC_TYPE_SKB) + dev_kfree_skb_any((struct sk_buff *)cb->priv); + else if (!HNAE3_IS_TX_RING(ring)) + put_page((struct page *)cb->priv); + memset(cb, 0, sizeof(*cb)); +} + +static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) +{ + cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, + cb->length, ring_to_dma_dir(ring)); + + if (dma_mapping_error(ring_to_dev(ring), cb->dma)) + return -EIO; + + return 0; +} + +static void hns3_unmap_buffer(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + if (cb->type == DESC_TYPE_SKB) + dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, + ring_to_dma_dir(ring)); + else + dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, + ring_to_dma_dir(ring)); +} + +static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) +{ + hns3_unmap_buffer(ring, &ring->desc_cb[i]); + ring->desc[i].addr = 0; +} + +static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i) +{ + struct hns3_desc_cb *cb = &ring->desc_cb[i]; + + if (!ring->desc_cb[i].dma) + return; + + hns3_buffer_detach(ring, i); + hns3_free_buffer(ring, cb); +} + +static void hns3_free_buffers(struct hns3_enet_ring *ring) +{ + int i; + + for (i = 0; i < ring->desc_num; i++) + hns3_free_buffer_detach(ring, i); +} + +/* free desc along with its attached buffer */ +static void hns3_free_desc(struct hns3_enet_ring *ring) +{ + hns3_free_buffers(ring); + + dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, + ring->desc_num * sizeof(ring->desc[0]), + DMA_BIDIRECTIONAL); + ring->desc_dma_addr = 0; + kfree(ring->desc); + ring->desc = NULL; +} + +static int hns3_alloc_desc(struct hns3_enet_ring *ring) +{ + int size = ring->desc_num * sizeof(ring->desc[0]); + + ring->desc = kzalloc(size, GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc, + size, DMA_BIDIRECTIONAL); + if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) { + ring->desc_dma_addr = 0; + kfree(ring->desc); + ring->desc = NULL; + return -ENOMEM; + } + + return 0; +} + +static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + int ret; + + ret = hns3_alloc_buffer(ring, cb); + if (ret) + goto out; + + ret = hns3_map_buffer(ring, cb); + if (ret) + goto out_with_buf; + + return 0; + +out_with_buf: + hns3_free_buffer(ring, cb); +out: + return ret; +} + +static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i) +{ + int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]); + + if (ret) + return ret; + + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); + + return 0; +} + +/* Allocate memory for raw pkg, and map with dma */ +static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) +{ + int i, j, ret; + + for (i = 0; i < ring->desc_num; i++) { + ret = hns3_alloc_buffer_attach(ring, i); + if (ret) + goto out_buffer_fail; + } + + return 0; + +out_buffer_fail: + for (j = i - 1; j >= 0; j--) + hns3_free_buffer_detach(ring, j); + return ret; +} + +/* detach a in-used buffer and replace with a reserved one */ +static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, + struct hns3_desc_cb *res_cb) +{ + hns3_unmap_buffer(ring, &ring->desc_cb[i]); + ring->desc_cb[i] = *res_cb; + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); +} + +static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) +{ + ring->desc_cb[i].reuse_flag = 0; + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + + ring->desc_cb[i].page_offset); +} + +static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, + int *pkts) +{ + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; + + (*pkts) += (desc_cb->type == DESC_TYPE_SKB); + (*bytes) += desc_cb->length; + /* desc_cb will be cleaned, after hnae_free_buffer_detach*/ + hns3_free_buffer_detach(ring, ring->next_to_clean); + + ring_ptr_move_fw(ring, next_to_clean); +} + +static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) +{ + int u = ring->next_to_use; + int c = ring->next_to_clean; + + if (unlikely(h > ring->desc_num)) + return 0; + + return u > c ? (h > c && h <= u) : (h > c || h <= u); +} + +bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) +{ + struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + struct netdev_queue *dev_queue; + int bytes, pkts; + int head; + + head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); + rmb(); /* Make sure head is ready before touch any data */ + + if (is_ring_empty(ring) || head == ring->next_to_clean) + return true; /* no data to poll */ + + if (!is_valid_clean_head(ring, head)) { + netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, + ring->next_to_use, ring->next_to_clean); + + u64_stats_update_begin(&ring->syncp); + ring->stats.io_err_cnt++; + u64_stats_update_end(&ring->syncp); + return true; + } + + bytes = 0; + pkts = 0; + while (head != ring->next_to_clean && budget) { + hns3_nic_reclaim_one_desc(ring, &bytes, &pkts); + /* Issue prefetch for next Tx descriptor */ + prefetch(&ring->desc_cb[ring->next_to_clean]); + budget--; + } + + ring->tqp_vector->tx_group.total_bytes += bytes; + ring->tqp_vector->tx_group.total_packets += pkts; + + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_bytes += bytes; + ring->stats.tx_pkts += pkts; + u64_stats_update_end(&ring->syncp); + + dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); + netdev_tx_completed_queue(dev_queue, pkts, bytes); + + if (unlikely(pkts && netif_carrier_ok(netdev) && + (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (netif_tx_queue_stopped(dev_queue)) { + netif_tx_wake_queue(dev_queue); + ring->stats.restart_queue++; + } + } + + return !!budget; +} + +static int hns3_desc_unused(struct hns3_enet_ring *ring) +{ + int ntc = ring->next_to_clean; + int ntu = ring->next_to_use; + + return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; +} + +static void +hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) +{ + struct hns3_desc_cb *desc_cb; + struct hns3_desc_cb res_cbs; + int i, ret; + + for (i = 0; i < cleand_count; i++) { + desc_cb = &ring->desc_cb[ring->next_to_use]; + if (desc_cb->reuse_flag) { + u64_stats_update_begin(&ring->syncp); + ring->stats.reuse_pg_cnt++; + u64_stats_update_end(&ring->syncp); + + hns3_reuse_buffer(ring, ring->next_to_use); + } else { + ret = hns3_reserve_buffer_map(ring, &res_cbs); + if (ret) { + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + + netdev_err(ring->tqp->handle->kinfo.netdev, + "hnae reserve buffer map failed.\n"); + break; + } + hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); + } + + ring_ptr_move_fw(ring, next_to_use); + } + + wmb(); /* Make all data has been write before submit */ + writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); +} + +/* hns3_nic_get_headlen - determine size of header for LRO/GRO + * @data: pointer to the start of the headers + * @max: total length of section to find headers in + * + * This function is meant to determine the length of headers that will + * be recognized by hardware for LRO, GRO, and RSC offloads. The main + * motivation of doing this is to only perform one pull for IPv4 TCP + * packets so that we can do basic things like calculating the gso_size + * based on the average data per packet. + */ +static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag, + unsigned int max_size) +{ + unsigned char *network; + u8 hlen; + + /* This should never happen, but better safe than sorry */ + if (max_size < ETH_HLEN) + return max_size; + + /* Initialize network frame pointer */ + network = data; + + /* Set first protocol and move network header forward */ + network += ETH_HLEN; + + /* Handle any vlan tag if present */ + if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S) + == HNS3_RX_FLAG_VLAN_PRESENT) { + if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN)) + return max_size; + + network += VLAN_HLEN; + } + + /* Handle L3 protocols */ + if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S) + == HNS3_RX_FLAG_L3ID_IPV4) { + if ((typeof(max_size))(network - data) > + (max_size - sizeof(struct iphdr))) + return max_size; + + /* Access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (network[0] & 0x0F) << 2; + + /* Verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct iphdr)) + return network - data; + + /* Record next protocol if header is present */ + } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S) + == HNS3_RX_FLAG_L3ID_IPV6) { + if ((typeof(max_size))(network - data) > + (max_size - sizeof(struct ipv6hdr))) + return max_size; + + /* Record next protocol */ + hlen = sizeof(struct ipv6hdr); + } else { + return network - data; + } + + /* Relocate pointer to start of L4 header */ + network += hlen; + + /* Finally sort out TCP/UDP */ + if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S) + == HNS3_RX_FLAG_L4ID_TCP) { + if ((typeof(max_size))(network - data) > + (max_size - sizeof(struct tcphdr))) + return max_size; + + /* Access doff as a u8 to avoid unaligned access on ia64 */ + hlen = (network[12] & 0xF0) >> 2; + + /* Verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct tcphdr)) + return network - data; + + network += hlen; + } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S) + == HNS3_RX_FLAG_L4ID_UDP) { + if ((typeof(max_size))(network - data) > + (max_size - sizeof(struct udphdr))) + return max_size; + + network += sizeof(struct udphdr); + } + + /* If everything has gone correctly network should be the + * data section of the packet and will be the end of the header. + * If not then it probably represents the end of the last recognized + * header. + */ + if ((typeof(max_size))(network - data) < max_size) + return network - data; + else + return max_size; +} + +static void hns3_nic_reuse_page(struct sk_buff *skb, int i, + struct hns3_enet_ring *ring, int pull_len, + struct hns3_desc_cb *desc_cb) +{ + struct hns3_desc *desc; + int truesize, size; + int last_offset; + bool twobufs; + + twobufs = ((PAGE_SIZE < 8192) && + hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048); + + desc = &ring->desc[ring->next_to_clean]; + size = le16_to_cpu(desc->rx.size); + + if (twobufs) { + truesize = hnae_buf_size(ring); + } else { + truesize = ALIGN(size, L1_CACHE_BYTES); + last_offset = hnae_page_size(ring) - hnae_buf_size(ring); + } + + skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, + size - pull_len, truesize - pull_len); + + /* Avoid re-using remote pages,flag default unreuse */ + if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) + return; + + if (twobufs) { + /* If we are only owner of page we can reuse it */ + if (likely(page_count(desc_cb->priv) == 1)) { + /* Flip page offset to other buffer */ + desc_cb->page_offset ^= truesize; + + desc_cb->reuse_flag = 1; + /* bump ref count on page before it is given*/ + get_page(desc_cb->priv); + } + return; + } + + /* Move offset up to the next cache line */ + desc_cb->page_offset += truesize; + + if (desc_cb->page_offset <= last_offset) { + desc_cb->reuse_flag = 1; + /* Bump ref count on page before it is given*/ + get_page(desc_cb->priv); + } +} + +static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, + struct hns3_desc *desc) +{ + struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + int l3_type, l4_type; + u32 bd_base_info; + int ol4_type; + u32 l234info; + + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + l234info = le32_to_cpu(desc->rx.l234_info); + + skb->ip_summed = CHECKSUM_NONE; + + skb_checksum_none_assert(skb); + + if (!(netdev->features & NETIF_F_RXCSUM)) + return; + + /* check if hardware has done checksum */ + if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) + return; + + if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) || + hnae_get_bit(l234info, HNS3_RXD_L4E_B) || + hnae_get_bit(l234info, HNS3_RXD_OL3E_B) || + hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) { + netdev_err(netdev, "L3/L4 error pkt\n"); + u64_stats_update_begin(&ring->syncp); + ring->stats.l3l4_csum_err++; + u64_stats_update_end(&ring->syncp); + + return; + } + + l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M, + HNS3_RXD_L3ID_S); + l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M, + HNS3_RXD_L4ID_S); + + ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); + switch (ol4_type) { + case HNS3_OL4_TYPE_MAC_IN_UDP: + case HNS3_OL4_TYPE_NVGRE: + skb->csum_level = 1; + case HNS3_OL4_TYPE_NO_TUN: + /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ + if (l3_type == HNS3_L3_TYPE_IPV4 || + (l3_type == HNS3_L3_TYPE_IPV6 && + (l4_type == HNS3_L4_TYPE_UDP || + l4_type == HNS3_L4_TYPE_TCP || + l4_type == HNS3_L4_TYPE_SCTP))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + } +} + +static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) +{ + napi_gro_receive(&ring->tqp_vector->napi, skb); +} + +static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, + struct sk_buff **out_skb, int *out_bnum) +{ + struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + struct hns3_desc_cb *desc_cb; + struct hns3_desc *desc; + struct sk_buff *skb; + unsigned char *va; + u32 bd_base_info; + int pull_len; + u32 l234info; + int length; + int bnum; + + desc = &ring->desc[ring->next_to_clean]; + desc_cb = &ring->desc_cb[ring->next_to_clean]; + + prefetch(desc); + + length = le16_to_cpu(desc->rx.pkt_len); + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + l234info = le32_to_cpu(desc->rx.l234_info); + + /* Check valid BD */ + if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B)) + return -EFAULT; + + va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; + + /* Prefetch first cache line of first page + * Idea is to cache few bytes of the header of the packet. Our L1 Cache + * line size is 64B so need to prefetch twice to make it 128B. But in + * actual we can have greater size of caches with 128B Level 1 cache + * lines. In such a case, single fetch would suffice to cache in the + * relevant part of the header. + */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi, + HNS3_RX_HEAD_SIZE); + if (unlikely(!skb)) { + netdev_err(netdev, "alloc rx skb fail\n"); + + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + + return -ENOMEM; + } + + prefetchw(skb->data); + + bnum = 1; + if (length <= HNS3_RX_HEAD_SIZE) { + memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); + + /* We can reuse buffer as-is, just make sure it is local */ + if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) + desc_cb->reuse_flag = 1; + else /* This page cannot be reused so discard it */ + put_page(desc_cb->priv); + + ring_ptr_move_fw(ring, next_to_clean); + } else { + u64_stats_update_begin(&ring->syncp); + ring->stats.seg_pkt_cnt++; + u64_stats_update_end(&ring->syncp); + + pull_len = hns3_nic_get_headlen(va, l234info, + HNS3_RX_HEAD_SIZE); + memcpy(__skb_put(skb, pull_len), va, + ALIGN(pull_len, sizeof(long))); + + hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); + ring_ptr_move_fw(ring, next_to_clean); + + while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) { + desc = &ring->desc[ring->next_to_clean]; + desc_cb = &ring->desc_cb[ring->next_to_clean]; + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb); + ring_ptr_move_fw(ring, next_to_clean); + bnum++; + } + } + + *out_bnum = bnum; + + if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { + netdev_err(netdev, "no valid bd,%016llx,%016llx\n", + ((u64 *)desc)[0], ((u64 *)desc)[1]); + u64_stats_update_begin(&ring->syncp); + ring->stats.non_vld_descs++; + u64_stats_update_end(&ring->syncp); + + dev_kfree_skb_any(skb); + return -EINVAL; + } + + if (unlikely((!desc->rx.pkt_len) || + hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { + netdev_err(netdev, "truncated pkt\n"); + u64_stats_update_begin(&ring->syncp); + ring->stats.err_pkt_len++; + u64_stats_update_end(&ring->syncp); + + dev_kfree_skb_any(skb); + return -EFAULT; + } + + if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) { + netdev_err(netdev, "L2 error pkt\n"); + u64_stats_update_begin(&ring->syncp); + ring->stats.l2_err++; + u64_stats_update_end(&ring->syncp); + + dev_kfree_skb_any(skb); + return -EFAULT; + } + + u64_stats_update_begin(&ring->syncp); + ring->stats.rx_pkts++; + ring->stats.rx_bytes += skb->len; + u64_stats_update_end(&ring->syncp); + + ring->tqp_vector->rx_group.total_bytes += skb->len; + + hns3_rx_checksum(ring, skb, desc); + return 0; +} + +int hns3_clean_rx_ring( + struct hns3_enet_ring *ring, int budget, + void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) +{ +#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 + struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + int recv_pkts, recv_bds, clean_count, err; + int unused_count = hns3_desc_unused(ring); + struct sk_buff *skb = NULL; + int num, bnum = 0; + + num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); + rmb(); /* Make sure num taken effect before the other data is touched */ + + recv_pkts = 0, recv_bds = 0, clean_count = 0; + num -= unused_count; + + while (recv_pkts < budget && recv_bds < num) { + /* Reuse or realloc buffers */ + if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { + hns3_nic_alloc_rx_buffers(ring, + clean_count + unused_count); + clean_count = 0; + unused_count = hns3_desc_unused(ring); + } + + /* Poll one pkt */ + err = hns3_handle_rx_bd(ring, &skb, &bnum); + if (unlikely(!skb)) /* This fault cannot be repaired */ + goto out; + + recv_bds += bnum; + clean_count += bnum; + if (unlikely(err)) { /* Do jump the err */ + recv_pkts++; + continue; + } + + /* Do update ip stack process */ + skb->protocol = eth_type_trans(skb, netdev); + rx_fn(ring, skb); + + recv_pkts++; + } + +out: + /* Make all data has been write before submit */ + if (clean_count + unused_count > 0) + hns3_nic_alloc_rx_buffers(ring, + clean_count + unused_count); + + return recv_pkts; +} + +static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) +{ +#define HNS3_RX_ULTRA_PACKET_RATE 40000 + enum hns3_flow_level_range new_flow_level; + struct hns3_enet_tqp_vector *tqp_vector; + int packets_per_secs; + int bytes_per_usecs; + u16 new_int_gl; + int usecs; + + if (!ring_group->int_gl) + return false; + + if (ring_group->total_packets == 0) { + ring_group->int_gl = HNS3_INT_GL_50K; + ring_group->flow_level = HNS3_FLOW_LOW; + return true; + } + + /* Simple throttlerate management + * 0-10MB/s lower (50000 ints/s) + * 10-20MB/s middle (20000 ints/s) + * 20-1249MB/s high (18000 ints/s) + * > 40000pps ultra (8000 ints/s) + */ + new_flow_level = ring_group->flow_level; + new_int_gl = ring_group->int_gl; + tqp_vector = ring_group->ring->tqp_vector; + usecs = (ring_group->int_gl << 1); + bytes_per_usecs = ring_group->total_bytes / usecs; + /* 1000000 microseconds */ + packets_per_secs = ring_group->total_packets * 1000000 / usecs; + + switch (new_flow_level) { + case HNS3_FLOW_LOW: + if (bytes_per_usecs > 10) + new_flow_level = HNS3_FLOW_MID; + break; + case HNS3_FLOW_MID: + if (bytes_per_usecs > 20) + new_flow_level = HNS3_FLOW_HIGH; + else if (bytes_per_usecs <= 10) + new_flow_level = HNS3_FLOW_LOW; + break; + case HNS3_FLOW_HIGH: + case HNS3_FLOW_ULTRA: + default: + if (bytes_per_usecs <= 20) + new_flow_level = HNS3_FLOW_MID; + break; + } + + if ((packets_per_secs > HNS3_RX_ULTRA_PACKET_RATE) && + (&tqp_vector->rx_group == ring_group)) + new_flow_level = HNS3_FLOW_ULTRA; + + switch (new_flow_level) { + case HNS3_FLOW_LOW: + new_int_gl = HNS3_INT_GL_50K; + break; + case HNS3_FLOW_MID: + new_int_gl = HNS3_INT_GL_20K; + break; + case HNS3_FLOW_HIGH: + new_int_gl = HNS3_INT_GL_18K; + break; + case HNS3_FLOW_ULTRA: + new_int_gl = HNS3_INT_GL_8K; + break; + default: + break; + } + + ring_group->total_bytes = 0; + ring_group->total_packets = 0; + ring_group->flow_level = new_flow_level; + if (new_int_gl != ring_group->int_gl) { + ring_group->int_gl = new_int_gl; + return true; + } + return false; +} + +static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) +{ + u16 rx_int_gl, tx_int_gl; + bool rx, tx; + + rx = hns3_get_new_int_gl(&tqp_vector->rx_group); + tx = hns3_get_new_int_gl(&tqp_vector->tx_group); + rx_int_gl = tqp_vector->rx_group.int_gl; + tx_int_gl = tqp_vector->tx_group.int_gl; + if (rx && tx) { + if (rx_int_gl > tx_int_gl) { + tqp_vector->tx_group.int_gl = rx_int_gl; + tqp_vector->tx_group.flow_level = + tqp_vector->rx_group.flow_level; + hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl); + } else { + tqp_vector->rx_group.int_gl = tx_int_gl; + tqp_vector->rx_group.flow_level = + tqp_vector->tx_group.flow_level; + hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl); + } + } +} + +static int hns3_nic_common_poll(struct napi_struct *napi, int budget) +{ + struct hns3_enet_ring *ring; + int rx_pkt_total = 0; + + struct hns3_enet_tqp_vector *tqp_vector = + container_of(napi, struct hns3_enet_tqp_vector, napi); + bool clean_complete = true; + int rx_budget; + + /* Since the actual Tx work is minimal, we can give the Tx a larger + * budget and be more aggressive about cleaning up the Tx descriptors. + */ + hns3_for_each_ring(ring, tqp_vector->tx_group) { + if (!hns3_clean_tx_ring(ring, budget)) + clean_complete = false; + } + + /* make sure rx ring budget not smaller than 1 */ + rx_budget = max(budget / tqp_vector->num_tqps, 1); + + hns3_for_each_ring(ring, tqp_vector->rx_group) { + int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, + hns3_rx_skb); + + if (rx_cleaned >= rx_budget) + clean_complete = false; + + rx_pkt_total += rx_cleaned; + } + + tqp_vector->rx_group.total_packets += rx_pkt_total; + + if (!clean_complete) + return budget; + + napi_complete(napi); + hns3_update_new_int_gl(tqp_vector); + hns3_mask_vector_irq(tqp_vector, 1); + + return rx_pkt_total; +} + +static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, + struct hnae3_ring_chain_node *head) +{ + struct pci_dev *pdev = tqp_vector->handle->pdev; + struct hnae3_ring_chain_node *cur_chain = head; + struct hnae3_ring_chain_node *chain; + struct hns3_enet_ring *tx_ring; + struct hns3_enet_ring *rx_ring; + + tx_ring = tqp_vector->tx_group.ring; + if (tx_ring) { + cur_chain->tqp_index = tx_ring->tqp->tqp_index; + hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_TX); + + cur_chain->next = NULL; + + while (tx_ring->next) { + tx_ring = tx_ring->next; + + chain = devm_kzalloc(&pdev->dev, sizeof(*chain), + GFP_KERNEL); + if (!chain) + return -ENOMEM; + + cur_chain->next = chain; + chain->tqp_index = tx_ring->tqp->tqp_index; + hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_TX); + + cur_chain = chain; + } + } + + rx_ring = tqp_vector->rx_group.ring; + if (!tx_ring && rx_ring) { + cur_chain->next = NULL; + cur_chain->tqp_index = rx_ring->tqp->tqp_index; + hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_RX); + + rx_ring = rx_ring->next; + } + + while (rx_ring) { + chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); + if (!chain) + return -ENOMEM; + + cur_chain->next = chain; + chain->tqp_index = rx_ring->tqp->tqp_index; + hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_RX); + cur_chain = chain; + + rx_ring = rx_ring->next; + } + + return 0; +} + +static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, + struct hnae3_ring_chain_node *head) +{ + struct pci_dev *pdev = tqp_vector->handle->pdev; + struct hnae3_ring_chain_node *chain_tmp, *chain; + + chain = head->next; + + while (chain) { + chain_tmp = chain->next; + devm_kfree(&pdev->dev, chain); + chain = chain_tmp; + } +} + +static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, + struct hns3_enet_ring *ring) +{ + ring->next = group->ring; + group->ring = ring; + + group->count++; +} + +static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) +{ + struct hnae3_ring_chain_node vector_ring_chain; + struct hnae3_handle *h = priv->ae_handle; + struct hns3_enet_tqp_vector *tqp_vector; + struct hnae3_vector_info *vector; + struct pci_dev *pdev = h->pdev; + u16 tqp_num = h->kinfo.num_tqps; + u16 vector_num; + int ret = 0; + u16 i; + + /* RSS size, cpu online and vector_num should be the same */ + /* Should consider 2p/4p later */ + vector_num = min_t(u16, num_online_cpus(), tqp_num); + vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), + GFP_KERNEL); + if (!vector) + return -ENOMEM; + + vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); + + priv->vector_num = vector_num; + priv->tqp_vector = (struct hns3_enet_tqp_vector *) + devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), + GFP_KERNEL); + if (!priv->tqp_vector) + return -ENOMEM; + + for (i = 0; i < tqp_num; i++) { + u16 vector_i = i % vector_num; + + tqp_vector = &priv->tqp_vector[vector_i]; + + hns3_add_ring_to_group(&tqp_vector->tx_group, + priv->ring_data[i].ring); + + hns3_add_ring_to_group(&tqp_vector->rx_group, + priv->ring_data[i + tqp_num].ring); + + tqp_vector->idx = vector_i; + tqp_vector->mask_addr = vector[vector_i].io_addr; + tqp_vector->vector_irq = vector[vector_i].vector; + tqp_vector->num_tqps++; + + priv->ring_data[i].ring->tqp_vector = tqp_vector; + priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector; + } + + for (i = 0; i < vector_num; i++) { + tqp_vector = &priv->tqp_vector[i]; + + tqp_vector->rx_group.total_bytes = 0; + tqp_vector->rx_group.total_packets = 0; + tqp_vector->tx_group.total_bytes = 0; + tqp_vector->tx_group.total_packets = 0; + hns3_vector_gl_rl_init(tqp_vector); + tqp_vector->handle = h; + + ret = hns3_get_vector_ring_chain(tqp_vector, + &vector_ring_chain); + if (ret) + goto out; + + ret = h->ae_algo->ops->map_ring_to_vector(h, + tqp_vector->vector_irq, &vector_ring_chain); + if (ret) + goto out; + + hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); + + netif_napi_add(priv->netdev, &tqp_vector->napi, + hns3_nic_common_poll, NAPI_POLL_WEIGHT); + } + +out: + devm_kfree(&pdev->dev, vector); + return ret; +} + +static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) +{ + struct hnae3_ring_chain_node vector_ring_chain; + struct hnae3_handle *h = priv->ae_handle; + struct hns3_enet_tqp_vector *tqp_vector; + struct pci_dev *pdev = h->pdev; + int i, ret; + + for (i = 0; i < priv->vector_num; i++) { + tqp_vector = &priv->tqp_vector[i]; + + ret = hns3_get_vector_ring_chain(tqp_vector, + &vector_ring_chain); + if (ret) + return ret; + + ret = h->ae_algo->ops->unmap_ring_from_vector(h, + tqp_vector->vector_irq, &vector_ring_chain); + if (ret) + return ret; + + hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); + + if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) { + (void)irq_set_affinity_hint( + priv->tqp_vector[i].vector_irq, + NULL); + free_irq(priv->tqp_vector[i].vector_irq, + &priv->tqp_vector[i]); + } + + priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED; + + netif_napi_del(&priv->tqp_vector[i].napi); + } + + devm_kfree(&pdev->dev, priv->tqp_vector); + + return 0; +} + +static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, + int ring_type) +{ + struct hns3_nic_ring_data *ring_data = priv->ring_data; + int queue_num = priv->ae_handle->kinfo.num_tqps; + struct pci_dev *pdev = priv->ae_handle->pdev; + struct hns3_enet_ring *ring; + + ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL); + if (!ring) + return -ENOMEM; + + if (ring_type == HNAE3_RING_TYPE_TX) { + ring_data[q->tqp_index].ring = ring; + ring_data[q->tqp_index].queue_index = q->tqp_index; + ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; + } else { + ring_data[q->tqp_index + queue_num].ring = ring; + ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index; + ring->io_base = q->io_base; + } + + hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); + + ring->tqp = q; + ring->desc = NULL; + ring->desc_cb = NULL; + ring->dev = priv->dev; + ring->desc_dma_addr = 0; + ring->buf_size = q->buf_size; + ring->desc_num = q->desc_num; + ring->next_to_use = 0; + ring->next_to_clean = 0; + + return 0; +} + +static int hns3_queue_to_ring(struct hnae3_queue *tqp, + struct hns3_nic_priv *priv) +{ + int ret; + + ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); + if (ret) + return ret; + + ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); + if (ret) + return ret; + + return 0; +} + +static int hns3_get_ring_config(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + struct pci_dev *pdev = h->pdev; + int i, ret; + + priv->ring_data = devm_kzalloc(&pdev->dev, h->kinfo.num_tqps * + sizeof(*priv->ring_data) * 2, + GFP_KERNEL); + if (!priv->ring_data) + return -ENOMEM; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv); + if (ret) + goto err; + } + + return 0; +err: + devm_kfree(&pdev->dev, priv->ring_data); + return ret; +} + +static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) +{ + int ret; + + if (ring->desc_num <= 0 || ring->buf_size <= 0) + return -EINVAL; + + ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]), + GFP_KERNEL); + if (!ring->desc_cb) { + ret = -ENOMEM; + goto out; + } + + ret = hns3_alloc_desc(ring); + if (ret) + goto out_with_desc_cb; + + if (!HNAE3_IS_TX_RING(ring)) { + ret = hns3_alloc_ring_buffers(ring); + if (ret) + goto out_with_desc; + } + + return 0; + +out_with_desc: + hns3_free_desc(ring); +out_with_desc_cb: + kfree(ring->desc_cb); + ring->desc_cb = NULL; +out: + return ret; +} + +static void hns3_fini_ring(struct hns3_enet_ring *ring) +{ + hns3_free_desc(ring); + kfree(ring->desc_cb); + ring->desc_cb = NULL; + ring->next_to_clean = 0; + ring->next_to_use = 0; +} + +static int hns3_buf_size2type(u32 buf_size) +{ + int bd_size_type; + + switch (buf_size) { + case 512: + bd_size_type = HNS3_BD_SIZE_512_TYPE; + break; + case 1024: + bd_size_type = HNS3_BD_SIZE_1024_TYPE; + break; + case 2048: + bd_size_type = HNS3_BD_SIZE_2048_TYPE; + break; + case 4096: + bd_size_type = HNS3_BD_SIZE_4096_TYPE; + break; + default: + bd_size_type = HNS3_BD_SIZE_2048_TYPE; + } + + return bd_size_type; +} + +static void hns3_init_ring_hw(struct hns3_enet_ring *ring) +{ + dma_addr_t dma = ring->desc_dma_addr; + struct hnae3_queue *q = ring->tqp; + + if (!HNAE3_IS_TX_RING(ring)) { + hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, + (u32)dma); + hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, + (u32)((dma >> 31) >> 1)); + + hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG, + hns3_buf_size2type(ring->buf_size)); + hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG, + ring->desc_num / 8 - 1); + + } else { + hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG, + (u32)dma); + hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG, + (u32)((dma >> 31) >> 1)); + + hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG, + hns3_buf_size2type(ring->buf_size)); + hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG, + ring->desc_num / 8 - 1); + } +} + +int hns3_init_all_ring(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + int ring_num = h->kinfo.num_tqps * 2; + int i, j; + int ret; + + for (i = 0; i < ring_num; i++) { + ret = hns3_alloc_ring_memory(priv->ring_data[i].ring); + if (ret) { + dev_err(priv->dev, + "Alloc ring memory fail! ret=%d\n", ret); + goto out_when_alloc_ring_memory; + } + + hns3_init_ring_hw(priv->ring_data[i].ring); + + u64_stats_init(&priv->ring_data[i].ring->syncp); + } + + return 0; + +out_when_alloc_ring_memory: + for (j = i - 1; j >= 0; j--) + hns3_fini_ring(priv->ring_data[j].ring); + + return -ENOMEM; +} + +int hns3_uninit_all_ring(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + int i; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + if (h->ae_algo->ops->reset_queue) + h->ae_algo->ops->reset_queue(h, i); + + hns3_fini_ring(priv->ring_data[i].ring); + hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); + } + + return 0; +} + +/* Set mac addr if it is configured. or leave it to the AE driver */ +static void hns3_init_mac_addr(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + u8 mac_addr_temp[ETH_ALEN]; + + if (h->ae_algo->ops->get_mac_addr) { + h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); + ether_addr_copy(netdev->dev_addr, mac_addr_temp); + } + + /* Check if the MAC address is valid, if not get a random one */ + if (!is_valid_ether_addr(netdev->dev_addr)) { + eth_hw_addr_random(netdev); + dev_warn(priv->dev, "using random MAC address %pM\n", + netdev->dev_addr); + } + + if (h->ae_algo->ops->set_mac_addr) + h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr); + +} + +static void hns3_nic_set_priv_ops(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + + if ((netdev->features & NETIF_F_TSO) || + (netdev->features & NETIF_F_TSO6)) { + priv->ops.fill_desc = hns3_fill_desc_tso; + priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; + } else { + priv->ops.fill_desc = hns3_fill_desc; + priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; + } +} + +static int hns3_client_init(struct hnae3_handle *handle) +{ + struct pci_dev *pdev = handle->pdev; + struct hns3_nic_priv *priv; + struct net_device *netdev; + int ret; + + netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), + handle->kinfo.num_tqps); + if (!netdev) + return -ENOMEM; + + priv = netdev_priv(netdev); + priv->dev = &pdev->dev; + priv->netdev = netdev; + priv->ae_handle = handle; + priv->last_reset_time = jiffies; + priv->reset_level = HNAE3_FUNC_RESET; + priv->tx_timeout_count = 0; + + handle->kinfo.netdev = netdev; + handle->priv = (void *)priv; + + hns3_init_mac_addr(netdev); + + hns3_set_default_feature(netdev); + + netdev->watchdog_timeo = HNS3_TX_TIMEOUT; + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->netdev_ops = &hns3_nic_netdev_ops; + SET_NETDEV_DEV(netdev, &pdev->dev); + hns3_ethtool_set_ops(netdev); + hns3_nic_set_priv_ops(netdev); + + /* Carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + ret = hns3_get_ring_config(priv); + if (ret) { + ret = -ENOMEM; + goto out_get_ring_cfg; + } + + ret = hns3_nic_init_vector_data(priv); + if (ret) { + ret = -ENOMEM; + goto out_init_vector_data; + } + + ret = hns3_init_all_ring(priv); + if (ret) { + ret = -ENOMEM; + goto out_init_ring_data; + } + + ret = register_netdev(netdev); + if (ret) { + dev_err(priv->dev, "probe register netdev fail!\n"); + goto out_reg_netdev_fail; + } + + hns3_dcbnl_setup(handle); + + /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */ + netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); + + return ret; + +out_reg_netdev_fail: +out_init_ring_data: + (void)hns3_nic_uninit_vector_data(priv); + priv->ring_data = NULL; +out_init_vector_data: +out_get_ring_cfg: + priv->ae_handle = NULL; + free_netdev(netdev); + return ret; +} + +static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) +{ + struct net_device *netdev = handle->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(netdev); + int ret; + + if (netdev->reg_state != NETREG_UNINITIALIZED) + unregister_netdev(netdev); + + ret = hns3_nic_uninit_vector_data(priv); + if (ret) + netdev_err(netdev, "uninit vector error\n"); + + ret = hns3_uninit_all_ring(priv); + if (ret) + netdev_err(netdev, "uninit ring error\n"); + + priv->ring_data = NULL; + + free_netdev(netdev); +} + +static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) +{ + struct net_device *netdev = handle->kinfo.netdev; + + if (!netdev) + return; + + if (linkup) { + netif_carrier_on(netdev); + netif_tx_wake_all_queues(netdev); + netdev_info(netdev, "link up\n"); + } else { + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + netdev_info(netdev, "link down\n"); + } +} + +static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct net_device *ndev = kinfo->netdev; + bool if_running; + int ret; + u8 i; + + if (tc > HNAE3_MAX_TC) + return -EINVAL; + + if (!ndev) + return -ENODEV; + + if_running = netif_running(ndev); + + ret = netdev_set_num_tc(ndev, tc); + if (ret) + return ret; + + if (if_running) { + (void)hns3_nic_net_stop(ndev); + msleep(100); + } + + ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ? + kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP; + if (ret) + goto err_out; + + if (tc <= 1) { + netdev_reset_tc(ndev); + goto out; + } + + for (i = 0; i < HNAE3_MAX_TC; i++) { + struct hnae3_tc_info *tc_info = &kinfo->tc_info[i]; + + if (tc_info->enable) + netdev_set_tc_queue(ndev, + tc_info->tc, + tc_info->tqp_count, + tc_info->tqp_offset); + } + + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { + netdev_set_prio_tc_map(ndev, i, + kinfo->prio_tc[i]); + } + +out: + ret = hns3_nic_set_real_num_queue(ndev); + +err_out: + if (if_running) + (void)hns3_nic_net_open(ndev); + + return ret; +} + +static void hns3_recover_hw_addr(struct net_device *ndev) +{ + struct netdev_hw_addr_list *list; + struct netdev_hw_addr *ha, *tmp; + + /* go through and sync uc_addr entries to the device */ + list = &ndev->uc; + list_for_each_entry_safe(ha, tmp, &list->list, list) + hns3_nic_uc_sync(ndev, ha->addr); + + /* go through and sync mc_addr entries to the device */ + list = &ndev->mc; + list_for_each_entry_safe(ha, tmp, &list->list, list) + hns3_nic_mc_sync(ndev, ha->addr); +} + +static void hns3_drop_skb_data(struct hns3_enet_ring *ring, struct sk_buff *skb) +{ + dev_kfree_skb_any(skb); +} + +static void hns3_clear_all_ring(struct hnae3_handle *h) +{ + struct net_device *ndev = h->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(ndev); + u32 i; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + struct netdev_queue *dev_queue; + struct hns3_enet_ring *ring; + + ring = priv->ring_data[i].ring; + hns3_clean_tx_ring(ring, ring->desc_num); + dev_queue = netdev_get_tx_queue(ndev, + priv->ring_data[i].queue_index); + netdev_tx_reset_queue(dev_queue); + + ring = priv->ring_data[i + h->kinfo.num_tqps].ring; + hns3_clean_rx_ring(ring, ring->desc_num, hns3_drop_skb_data); + } +} + +static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct net_device *ndev = kinfo->netdev; + + if (!netif_running(ndev)) + return -EIO; + + return hns3_nic_net_stop(ndev); +} + +static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); + int ret = 0; + + if (netif_running(kinfo->netdev)) { + ret = hns3_nic_net_up(kinfo->netdev); + if (ret) { + netdev_err(kinfo->netdev, + "hns net up fail, ret=%d!\n", ret); + return ret; + } + + priv->last_reset_time = jiffies; + } + + return ret; +} + +static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) +{ + struct net_device *netdev = handle->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(netdev); + int ret; + + priv->reset_level = 1; + hns3_init_mac_addr(netdev); + hns3_nic_set_rx_mode(netdev); + hns3_recover_hw_addr(netdev); + + /* Carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + ret = hns3_get_ring_config(priv); + if (ret) + return ret; + + ret = hns3_nic_init_vector_data(priv); + if (ret) + return ret; + + ret = hns3_init_all_ring(priv); + if (ret) { + hns3_nic_uninit_vector_data(priv); + priv->ring_data = NULL; + } + + return ret; +} + +static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) +{ + struct net_device *netdev = handle->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(netdev); + int ret; + + hns3_clear_all_ring(handle); + + ret = hns3_nic_uninit_vector_data(priv); + if (ret) { + netdev_err(netdev, "uninit vector error\n"); + return ret; + } + + ret = hns3_uninit_all_ring(priv); + if (ret) + netdev_err(netdev, "uninit ring error\n"); + + priv->ring_data = NULL; + + return ret; +} + +static int hns3_reset_notify(struct hnae3_handle *handle, + enum hnae3_reset_notify_type type) +{ + int ret = 0; + + switch (type) { + case HNAE3_UP_CLIENT: + ret = hns3_reset_notify_up_enet(handle); + break; + case HNAE3_DOWN_CLIENT: + ret = hns3_reset_notify_down_enet(handle); + break; + case HNAE3_INIT_CLIENT: + ret = hns3_reset_notify_init_enet(handle); + break; + case HNAE3_UNINIT_CLIENT: + ret = hns3_reset_notify_uninit_enet(handle); + break; + default: + break; + } + + return ret; +} + +static const struct hnae3_client_ops client_ops = { + .init_instance = hns3_client_init, + .uninit_instance = hns3_client_uninit, + .link_status_change = hns3_link_status_change, + .setup_tc = hns3_client_setup_tc, + .reset_notify = hns3_reset_notify, +}; + +/* hns3_init_module - Driver registration routine + * hns3_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init hns3_init_module(void) +{ + int ret; + + pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); + pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); + + client.type = HNAE3_CLIENT_KNIC; + snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s", + hns3_driver_name); + + client.ops = &client_ops; + + ret = hnae3_register_client(&client); + if (ret) + return ret; + + ret = pci_register_driver(&hns3_driver); + if (ret) + hnae3_unregister_client(&client); + + return ret; +} +module_init(hns3_init_module); + +/* hns3_exit_module - Driver exit cleanup routine + * hns3_exit_module is called just before the driver is removed + * from memory. + */ +static void __exit hns3_exit_module(void) +{ + pci_unregister_driver(&hns3_driver); + hnae3_unregister_client(&client); +} +module_exit(hns3_exit_module); + +MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver"); +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("pci:hns-nic"); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h new file mode 100644 index 000000000000..8a9de759957b --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -0,0 +1,613 @@ +/* + * Copyright (c) 2016 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __HNS3_ENET_H +#define __HNS3_ENET_H + +#include "hnae3.h" + +extern const char hns3_driver_version[]; + +enum hns3_nic_state { + HNS3_NIC_STATE_TESTING, + HNS3_NIC_STATE_RESETTING, + HNS3_NIC_STATE_REINITING, + HNS3_NIC_STATE_DOWN, + HNS3_NIC_STATE_DISABLED, + HNS3_NIC_STATE_REMOVING, + HNS3_NIC_STATE_SERVICE_INITED, + HNS3_NIC_STATE_SERVICE_SCHED, + HNS3_NIC_STATE2_RESET_REQUESTED, + HNS3_NIC_STATE_MAX +}; + +#define HNS3_RING_RX_RING_BASEADDR_L_REG 0x00000 +#define HNS3_RING_RX_RING_BASEADDR_H_REG 0x00004 +#define HNS3_RING_RX_RING_BD_NUM_REG 0x00008 +#define HNS3_RING_RX_RING_BD_LEN_REG 0x0000C +#define HNS3_RING_RX_RING_TAIL_REG 0x00018 +#define HNS3_RING_RX_RING_HEAD_REG 0x0001C +#define HNS3_RING_RX_RING_FBDNUM_REG 0x00020 +#define HNS3_RING_RX_RING_PKTNUM_RECORD_REG 0x0002C + +#define HNS3_RING_TX_RING_BASEADDR_L_REG 0x00040 +#define HNS3_RING_TX_RING_BASEADDR_H_REG 0x00044 +#define HNS3_RING_TX_RING_BD_NUM_REG 0x00048 +#define HNS3_RING_TX_RING_BD_LEN_REG 0x0004C +#define HNS3_RING_TX_RING_TAIL_REG 0x00058 +#define HNS3_RING_TX_RING_HEAD_REG 0x0005C +#define HNS3_RING_TX_RING_FBDNUM_REG 0x00060 +#define HNS3_RING_TX_RING_OFFSET_REG 0x00064 +#define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C + +#define HNS3_RING_PREFETCH_EN_REG 0x0007C +#define HNS3_RING_CFG_VF_NUM_REG 0x00080 +#define HNS3_RING_ASID_REG 0x0008C +#define HNS3_RING_RX_VM_REG 0x00090 +#define HNS3_RING_T0_BE_RST 0x00094 +#define HNS3_RING_COULD_BE_RST 0x00098 +#define HNS3_RING_WRR_WEIGHT_REG 0x0009c + +#define HNS3_RING_INTMSK_RXWL_REG 0x000A0 +#define HNS3_RING_INTSTS_RX_RING_REG 0x000A4 +#define HNS3_RX_RING_INT_STS_REG 0x000A8 +#define HNS3_RING_INTMSK_TXWL_REG 0x000AC +#define HNS3_RING_INTSTS_TX_RING_REG 0x000B0 +#define HNS3_TX_RING_INT_STS_REG 0x000B4 +#define HNS3_RING_INTMSK_RX_OVERTIME_REG 0x000B8 +#define HNS3_RING_INTSTS_RX_OVERTIME_REG 0x000BC +#define HNS3_RING_INTMSK_TX_OVERTIME_REG 0x000C4 +#define HNS3_RING_INTSTS_TX_OVERTIME_REG 0x000C8 + +#define HNS3_RING_MB_CTRL_REG 0x00100 +#define HNS3_RING_MB_DATA_BASE_REG 0x00200 + +#define HNS3_TX_REG_OFFSET 0x40 + +#define HNS3_RX_HEAD_SIZE 256 + +#define HNS3_TX_TIMEOUT (5 * HZ) +#define HNS3_RING_NAME_LEN 16 +#define HNS3_BUFFER_SIZE_2048 2048 +#define HNS3_RING_MAX_PENDING 32768 +#define HNS3_RING_MIN_PENDING 8 +#define HNS3_RING_BD_MULTIPLE 8 +#define HNS3_MAX_MTU 9728 + +#define HNS3_BD_SIZE_512_TYPE 0 +#define HNS3_BD_SIZE_1024_TYPE 1 +#define HNS3_BD_SIZE_2048_TYPE 2 +#define HNS3_BD_SIZE_4096_TYPE 3 + +#define HNS3_RX_FLAG_VLAN_PRESENT 0x1 +#define HNS3_RX_FLAG_L3ID_IPV4 0x0 +#define HNS3_RX_FLAG_L3ID_IPV6 0x1 +#define HNS3_RX_FLAG_L4ID_UDP 0x0 +#define HNS3_RX_FLAG_L4ID_TCP 0x1 + +#define HNS3_RXD_DMAC_S 0 +#define HNS3_RXD_DMAC_M (0x3 << HNS3_RXD_DMAC_S) +#define HNS3_RXD_VLAN_S 2 +#define HNS3_RXD_VLAN_M (0x3 << HNS3_RXD_VLAN_S) +#define HNS3_RXD_L3ID_S 4 +#define HNS3_RXD_L3ID_M (0xf << HNS3_RXD_L3ID_S) +#define HNS3_RXD_L4ID_S 8 +#define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S) +#define HNS3_RXD_FRAG_B 12 +#define HNS3_RXD_L2E_B 16 +#define HNS3_RXD_L3E_B 17 +#define HNS3_RXD_L4E_B 18 +#define HNS3_RXD_TRUNCAT_B 19 +#define HNS3_RXD_HOI_B 20 +#define HNS3_RXD_DOI_B 21 +#define HNS3_RXD_OL3E_B 22 +#define HNS3_RXD_OL4E_B 23 + +#define HNS3_RXD_ODMAC_S 0 +#define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S) +#define HNS3_RXD_OVLAN_S 2 +#define HNS3_RXD_OVLAN_M (0x3 << HNS3_RXD_OVLAN_S) +#define HNS3_RXD_OL3ID_S 4 +#define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S) +#define HNS3_RXD_OL4ID_S 8 +#define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S) +#define HNS3_RXD_FBHI_S 12 +#define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S) +#define HNS3_RXD_FBLI_S 14 +#define HNS3_RXD_FBLI_M (0x3 << HNS3_RXD_FBLI_S) + +#define HNS3_RXD_BDTYPE_S 0 +#define HNS3_RXD_BDTYPE_M (0xf << HNS3_RXD_BDTYPE_S) +#define HNS3_RXD_VLD_B 4 +#define HNS3_RXD_UDP0_B 5 +#define HNS3_RXD_EXTEND_B 7 +#define HNS3_RXD_FE_B 8 +#define HNS3_RXD_LUM_B 9 +#define HNS3_RXD_CRCP_B 10 +#define HNS3_RXD_L3L4P_B 11 +#define HNS3_RXD_TSIND_S 12 +#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S) +#define HNS3_RXD_LKBK_B 15 +#define HNS3_RXD_HDL_S 16 +#define HNS3_RXD_HDL_M (0x7ff << HNS3_RXD_HDL_S) +#define HNS3_RXD_HSIND_B 31 + +#define HNS3_TXD_L3T_S 0 +#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S) +#define HNS3_TXD_L4T_S 2 +#define HNS3_TXD_L4T_M (0x3 << HNS3_TXD_L4T_S) +#define HNS3_TXD_L3CS_B 4 +#define HNS3_TXD_L4CS_B 5 +#define HNS3_TXD_VLAN_B 6 +#define HNS3_TXD_TSO_B 7 + +#define HNS3_TXD_L2LEN_S 8 +#define HNS3_TXD_L2LEN_M (0xff << HNS3_TXD_L2LEN_S) +#define HNS3_TXD_L3LEN_S 16 +#define HNS3_TXD_L3LEN_M (0xff << HNS3_TXD_L3LEN_S) +#define HNS3_TXD_L4LEN_S 24 +#define HNS3_TXD_L4LEN_M (0xff << HNS3_TXD_L4LEN_S) + +#define HNS3_TXD_OL3T_S 0 +#define HNS3_TXD_OL3T_M (0x3 << HNS3_TXD_OL3T_S) +#define HNS3_TXD_OVLAN_B 2 +#define HNS3_TXD_MACSEC_B 3 +#define HNS3_TXD_TUNTYPE_S 4 +#define HNS3_TXD_TUNTYPE_M (0xf << HNS3_TXD_TUNTYPE_S) + +#define HNS3_TXD_BDTYPE_S 0 +#define HNS3_TXD_BDTYPE_M (0xf << HNS3_TXD_BDTYPE_S) +#define HNS3_TXD_FE_B 4 +#define HNS3_TXD_SC_S 5 +#define HNS3_TXD_SC_M (0x3 << HNS3_TXD_SC_S) +#define HNS3_TXD_EXTEND_B 7 +#define HNS3_TXD_VLD_B 8 +#define HNS3_TXD_RI_B 9 +#define HNS3_TXD_RA_B 10 +#define HNS3_TXD_TSYN_B 11 +#define HNS3_TXD_DECTTL_S 12 +#define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S) + +#define HNS3_TXD_MSS_S 0 +#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S) + +#define HNS3_VECTOR_TX_IRQ BIT_ULL(0) +#define HNS3_VECTOR_RX_IRQ BIT_ULL(1) + +#define HNS3_VECTOR_NOT_INITED 0 +#define HNS3_VECTOR_INITED 1 + +#define HNS3_MAX_BD_SIZE 65535 +#define HNS3_MAX_BD_PER_FRAG 8 +#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS + +#define HNS3_VECTOR_GL0_OFFSET 0x100 +#define HNS3_VECTOR_GL1_OFFSET 0x200 +#define HNS3_VECTOR_GL2_OFFSET 0x300 +#define HNS3_VECTOR_RL_OFFSET 0x900 +#define HNS3_VECTOR_RL_EN_B 6 + +enum hns3_pkt_l3t_type { + HNS3_L3T_NONE, + HNS3_L3T_IPV6, + HNS3_L3T_IPV4, + HNS3_L3T_RESERVED +}; + +enum hns3_pkt_l4t_type { + HNS3_L4T_UNKNOWN, + HNS3_L4T_TCP, + HNS3_L4T_UDP, + HNS3_L4T_SCTP +}; + +enum hns3_pkt_ol3t_type { + HNS3_OL3T_NONE, + HNS3_OL3T_IPV6, + HNS3_OL3T_IPV4_NO_CSUM, + HNS3_OL3T_IPV4_CSUM +}; + +enum hns3_pkt_tun_type { + HNS3_TUN_NONE, + HNS3_TUN_MAC_IN_UDP, + HNS3_TUN_NVGRE, + HNS3_TUN_OTHER +}; + +/* hardware spec ring buffer format */ +struct __packed hns3_desc { + __le64 addr; + union { + struct { + __le16 vlan_tag; + __le16 send_size; + union { + __le32 type_cs_vlan_tso_len; + struct { + __u8 type_cs_vlan_tso; + __u8 l2_len; + __u8 l3_len; + __u8 l4_len; + }; + }; + __le16 outer_vlan_tag; + __le16 tv; + + union { + __le32 ol_type_vlan_len_msec; + struct { + __u8 ol_type_vlan_msec; + __u8 ol2_len; + __u8 ol3_len; + __u8 ol4_len; + }; + }; + + __le32 paylen; + __le16 bdtp_fe_sc_vld_ra_ri; + __le16 mss; + } tx; + + struct { + __le32 l234_info; + __le16 pkt_len; + __le16 size; + + __le32 rss_hash; + __le16 fd_id; + __le16 vlan_tag; + + union { + __le32 ol_info; + struct { + __le16 o_dm_vlan_id_fb; + __le16 ot_vlan_tag; + }; + }; + + __le32 bd_base_info; + } rx; + }; +}; + +struct hns3_desc_cb { + dma_addr_t dma; /* dma address of this desc */ + void *buf; /* cpu addr for a desc */ + + /* priv data for the desc, e.g. skb when use with ip stack*/ + void *priv; + u16 page_offset; + u16 reuse_flag; + + u16 length; /* length of the buffer */ + + /* desc type, used by the ring user to mark the type of the priv data */ + u16 type; +}; + +enum hns3_pkt_l3type { + HNS3_L3_TYPE_IPV4, + HNS3_L3_TYPE_IPV6, + HNS3_L3_TYPE_ARP, + HNS3_L3_TYPE_RARP, + HNS3_L3_TYPE_IPV4_OPT, + HNS3_L3_TYPE_IPV6_EXT, + HNS3_L3_TYPE_LLDP, + HNS3_L3_TYPE_BPDU, + HNS3_L3_TYPE_MAC_PAUSE, + HNS3_L3_TYPE_PFC_PAUSE,/* 0x9*/ + + /* reserved for 0xA~0xB*/ + + HNS3_L3_TYPE_CNM = 0xc, + + /* reserved for 0xD~0xE*/ + + HNS3_L3_TYPE_PARSE_FAIL = 0xf /* must be last */ +}; + +enum hns3_pkt_l4type { + HNS3_L4_TYPE_UDP, + HNS3_L4_TYPE_TCP, + HNS3_L4_TYPE_GRE, + HNS3_L4_TYPE_SCTP, + HNS3_L4_TYPE_IGMP, + HNS3_L4_TYPE_ICMP, + + /* reserved for 0x6~0xE */ + + HNS3_L4_TYPE_PARSE_FAIL = 0xf /* must be last */ +}; + +enum hns3_pkt_ol3type { + HNS3_OL3_TYPE_IPV4 = 0, + HNS3_OL3_TYPE_IPV6, + /* reserved for 0x2~0x3 */ + HNS3_OL3_TYPE_IPV4_OPT = 4, + HNS3_OL3_TYPE_IPV6_EXT, + + /* reserved for 0x6~0xE*/ + + HNS3_OL3_TYPE_PARSE_FAIL = 0xf /* must be last */ +}; + +enum hns3_pkt_ol4type { + HNS3_OL4_TYPE_NO_TUN, + HNS3_OL4_TYPE_MAC_IN_UDP, + HNS3_OL4_TYPE_NVGRE, + HNS3_OL4_TYPE_UNKNOWN +}; + +struct ring_stats { + u64 io_err_cnt; + u64 sw_err_cnt; + u64 seg_pkt_cnt; + union { + struct { + u64 tx_pkts; + u64 tx_bytes; + u64 tx_err_cnt; + u64 restart_queue; + u64 tx_busy; + }; + struct { + u64 rx_pkts; + u64 rx_bytes; + u64 rx_err_cnt; + u64 reuse_pg_cnt; + u64 err_pkt_len; + u64 non_vld_descs; + u64 err_bd_num; + u64 l2_err; + u64 l3l4_csum_err; + }; + }; +}; + +struct hns3_enet_ring { + u8 __iomem *io_base; /* base io address for the ring */ + struct hns3_desc *desc; /* dma map address space */ + struct hns3_desc_cb *desc_cb; + struct hns3_enet_ring *next; + struct hns3_enet_tqp_vector *tqp_vector; + struct hnae3_queue *tqp; + char ring_name[HNS3_RING_NAME_LEN]; + struct device *dev; /* will be used for DMA mapping of descriptors */ + + /* statistic */ + struct ring_stats stats; + struct u64_stats_sync syncp; + + dma_addr_t desc_dma_addr; + u32 buf_size; /* size for hnae_desc->addr, preset by AE */ + u16 desc_num; /* total number of desc */ + u16 max_desc_num_per_pkt; + u16 max_raw_data_sz_per_desc; + u16 max_pkt_size; + int next_to_use; /* idx of next spare desc */ + + /* idx of lastest sent desc, the ring is empty when equal to + * next_to_use + */ + int next_to_clean; + + u32 flag; /* ring attribute */ + int irq_init_flag; + + int numa_node; + cpumask_t affinity_mask; +}; + +struct hns_queue; + +struct hns3_nic_ring_data { + struct hns3_enet_ring *ring; + struct napi_struct napi; + int queue_index; + int (*poll_one)(struct hns3_nic_ring_data *, int, void *); + void (*ex_process)(struct hns3_nic_ring_data *, struct sk_buff *); + void (*fini_process)(struct hns3_nic_ring_data *); +}; + +struct hns3_nic_ops { + int (*fill_desc)(struct hns3_enet_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + enum hns_desc_type type); + int (*maybe_stop_tx)(struct sk_buff **out_skb, + int *bnum, struct hns3_enet_ring *ring); + void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum); +}; + +enum hns3_flow_level_range { + HNS3_FLOW_LOW = 0, + HNS3_FLOW_MID = 1, + HNS3_FLOW_HIGH = 2, + HNS3_FLOW_ULTRA = 3, +}; + +enum hns3_link_mode_bits { + HNS3_LM_FIBRE_BIT = BIT(0), + HNS3_LM_AUTONEG_BIT = BIT(1), + HNS3_LM_TP_BIT = BIT(2), + HNS3_LM_PAUSE_BIT = BIT(3), + HNS3_LM_BACKPLANE_BIT = BIT(4), + HNS3_LM_10BASET_HALF_BIT = BIT(5), + HNS3_LM_10BASET_FULL_BIT = BIT(6), + HNS3_LM_100BASET_HALF_BIT = BIT(7), + HNS3_LM_100BASET_FULL_BIT = BIT(8), + HNS3_LM_1000BASET_FULL_BIT = BIT(9), + HNS3_LM_10000BASEKR_FULL_BIT = BIT(10), + HNS3_LM_25000BASEKR_FULL_BIT = BIT(11), + HNS3_LM_40000BASELR4_FULL_BIT = BIT(12), + HNS3_LM_50000BASEKR2_FULL_BIT = BIT(13), + HNS3_LM_100000BASEKR4_FULL_BIT = BIT(14), + HNS3_LM_COUNT = 15 +}; + +#define HNS3_INT_GL_50K 0x000A +#define HNS3_INT_GL_20K 0x0019 +#define HNS3_INT_GL_18K 0x001B +#define HNS3_INT_GL_8K 0x003E + +struct hns3_enet_ring_group { + /* array of pointers to rings */ + struct hns3_enet_ring *ring; + u64 total_bytes; /* total bytes processed this group */ + u64 total_packets; /* total packets processed this group */ + u16 count; + enum hns3_flow_level_range flow_level; + u16 int_gl; +}; + +struct hns3_enet_tqp_vector { + struct hnae3_handle *handle; + u8 __iomem *mask_addr; + int vector_irq; + int irq_init_flag; + + u16 idx; /* index in the TQP vector array per handle. */ + + struct napi_struct napi; + + struct hns3_enet_ring_group rx_group; + struct hns3_enet_ring_group tx_group; + + u16 num_tqps; /* total number of tqps in TQP vector */ + + cpumask_t affinity_mask; + char name[HNAE3_INT_NAME_LEN]; + + /* when 0 should adjust interrupt coalesce parameter */ + u8 int_adapt_down; +} ____cacheline_internodealigned_in_smp; + +enum hns3_udp_tnl_type { + HNS3_UDP_TNL_VXLAN, + HNS3_UDP_TNL_GENEVE, + HNS3_UDP_TNL_MAX, +}; + +struct hns3_udp_tunnel { + u16 dst_port; + int used; +}; + +struct hns3_nic_priv { + struct hnae3_handle *ae_handle; + u32 enet_ver; + u32 port_id; + struct net_device *netdev; + struct device *dev; + struct hns3_nic_ops ops; + + /** + * the cb for nic to manage the ring buffer, the first half of the + * array is for tx_ring and vice versa for the second half + */ + struct hns3_nic_ring_data *ring_data; + struct hns3_enet_tqp_vector *tqp_vector; + u16 vector_num; + + /* The most recently read link state */ + int link; + u64 tx_timeout_count; + enum hnae3_reset_type reset_level; + unsigned long last_reset_time; + + unsigned long state; + + struct timer_list service_timer; + + struct work_struct service_task; + + struct notifier_block notifier_block; + /* Vxlan/Geneve information */ + struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX]; +}; + +union l3_hdr_info { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; +}; + +union l4_hdr_info { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; +}; + +/* the distance between [begin, end) in a ring buffer + * note: there is a unuse slot between the begin and the end + */ +static inline int ring_dist(struct hns3_enet_ring *ring, int begin, int end) +{ + return (end - begin + ring->desc_num) % ring->desc_num; +} + +static inline int ring_space(struct hns3_enet_ring *ring) +{ + return ring->desc_num - + ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1; +} + +static inline int is_ring_empty(struct hns3_enet_ring *ring) +{ + return ring->next_to_use == ring->next_to_clean; +} + +static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) +{ + u8 __iomem *reg_addr = READ_ONCE(base); + + writel(value, reg_addr + reg); +} + +#define hns3_write_dev(a, reg, value) \ + hns3_write_reg((a)->io_base, (reg), (value)) + +#define hnae_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \ + (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG) + +#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev) + +#define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \ + DMA_TO_DEVICE : DMA_FROM_DEVICE) + +#define tx_ring_data(priv, idx) ((priv)->ring_data[idx]) + +#define hnae_buf_size(_ring) ((_ring)->buf_size) +#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring))) +#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring)) + +/* iterator for handling rings in ring group */ +#define hns3_for_each_ring(pos, head) \ + for (pos = (head).ring; pos; pos = pos->next) + +#define hns3_get_handle(ndev) \ + (((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle) + +void hns3_ethtool_set_ops(struct net_device *netdev); + +bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); +int hns3_init_all_ring(struct hns3_nic_priv *priv); +int hns3_uninit_all_ring(struct hns3_nic_priv *priv); +netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev); +int hns3_clean_rx_ring( + struct hns3_enet_ring *ring, int budget, + void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)); + +#ifdef CONFIG_HNS3_DCB +void hns3_dcbnl_setup(struct hnae3_handle *handle); +#else +static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {} +#endif + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c new file mode 100644 index 000000000000..65a69b439457 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -0,0 +1,896 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include + +#include "hns3_enet.h" + +struct hns3_stats { + char stats_string[ETH_GSTRING_LEN]; + int stats_size; + int stats_offset; +}; + +/* tqp related stats */ +#define HNS3_TQP_STAT(_string, _member) { \ + .stats_string = _string, \ + .stats_size = FIELD_SIZEOF(struct ring_stats, _member), \ + .stats_offset = offsetof(struct hns3_enet_ring, stats), \ +} \ + +static const struct hns3_stats hns3_txq_stats[] = { + /* Tx per-queue statistics */ + HNS3_TQP_STAT("tx_io_err_cnt", io_err_cnt), + HNS3_TQP_STAT("tx_sw_err_cnt", sw_err_cnt), + HNS3_TQP_STAT("tx_seg_pkt_cnt", seg_pkt_cnt), + HNS3_TQP_STAT("tx_pkts", tx_pkts), + HNS3_TQP_STAT("tx_bytes", tx_bytes), + HNS3_TQP_STAT("tx_err_cnt", tx_err_cnt), + HNS3_TQP_STAT("tx_restart_queue", restart_queue), + HNS3_TQP_STAT("tx_busy", tx_busy), +}; + +#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats) + +static const struct hns3_stats hns3_rxq_stats[] = { + /* Rx per-queue statistics */ + HNS3_TQP_STAT("rx_io_err_cnt", io_err_cnt), + HNS3_TQP_STAT("rx_sw_err_cnt", sw_err_cnt), + HNS3_TQP_STAT("rx_seg_pkt_cnt", seg_pkt_cnt), + HNS3_TQP_STAT("rx_pkts", rx_pkts), + HNS3_TQP_STAT("rx_bytes", rx_bytes), + HNS3_TQP_STAT("rx_err_cnt", rx_err_cnt), + HNS3_TQP_STAT("rx_reuse_pg_cnt", reuse_pg_cnt), + HNS3_TQP_STAT("rx_err_pkt_len", err_pkt_len), + HNS3_TQP_STAT("rx_non_vld_descs", non_vld_descs), + HNS3_TQP_STAT("rx_err_bd_num", err_bd_num), + HNS3_TQP_STAT("rx_l2_err", l2_err), + HNS3_TQP_STAT("rx_l3l4_csum_err", l3l4_csum_err), +}; + +#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats) + +#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT) + +#define HNS3_SELF_TEST_TPYE_NUM 1 +#define HNS3_NIC_LB_TEST_PKT_NUM 1 +#define HNS3_NIC_LB_TEST_RING_ID 0 +#define HNS3_NIC_LB_TEST_PACKET_SIZE 128 + +/* Nic loopback test err */ +#define HNS3_NIC_LB_TEST_NO_MEM_ERR 1 +#define HNS3_NIC_LB_TEST_TX_CNT_ERR 2 +#define HNS3_NIC_LB_TEST_RX_CNT_ERR 3 + +struct hns3_link_mode_mapping { + u32 hns3_link_mode; + u32 ethtool_link_mode; +}; + +static const struct hns3_link_mode_mapping hns3_lm_map[] = { + {HNS3_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT}, + {HNS3_LM_AUTONEG_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT}, + {HNS3_LM_TP_BIT, ETHTOOL_LINK_MODE_TP_BIT}, + {HNS3_LM_PAUSE_BIT, ETHTOOL_LINK_MODE_Pause_BIT}, + {HNS3_LM_BACKPLANE_BIT, ETHTOOL_LINK_MODE_Backplane_BIT}, + {HNS3_LM_10BASET_HALF_BIT, ETHTOOL_LINK_MODE_10baseT_Half_BIT}, + {HNS3_LM_10BASET_FULL_BIT, ETHTOOL_LINK_MODE_10baseT_Full_BIT}, + {HNS3_LM_100BASET_HALF_BIT, ETHTOOL_LINK_MODE_100baseT_Half_BIT}, + {HNS3_LM_100BASET_FULL_BIT, ETHTOOL_LINK_MODE_100baseT_Full_BIT}, + {HNS3_LM_1000BASET_FULL_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT}, +}; + +static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + int ret; + + if (!h->ae_algo->ops->set_loopback || + !h->ae_algo->ops->set_promisc_mode) + return -EOPNOTSUPP; + + switch (loop) { + case HNAE3_MAC_INTER_LOOP_MAC: + ret = h->ae_algo->ops->set_loopback(h, loop, true); + break; + case HNAE3_MAC_LOOP_NONE: + ret = h->ae_algo->ops->set_loopback(h, + HNAE3_MAC_INTER_LOOP_MAC, false); + break; + default: + ret = -ENOTSUPP; + break; + } + + if (ret) + return ret; + + if (loop == HNAE3_MAC_LOOP_NONE) + h->ae_algo->ops->set_promisc_mode(h, ndev->flags & IFF_PROMISC); + else + h->ae_algo->ops->set_promisc_mode(h, 1); + + return ret; +} + +static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + int ret; + + if (!h->ae_algo->ops->start) + return -EOPNOTSUPP; + + ret = h->ae_algo->ops->start(h); + if (ret) { + netdev_err(ndev, + "hns3_lb_up ae start return error: %d\n", ret); + return ret; + } + + ret = hns3_lp_setup(ndev, loop_mode); + usleep_range(10000, 20000); + + return ret; +} + +static int hns3_lp_down(struct net_device *ndev) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + int ret; + + if (!h->ae_algo->ops->stop) + return -EOPNOTSUPP; + + ret = hns3_lp_setup(ndev, HNAE3_MAC_LOOP_NONE); + if (ret) { + netdev_err(ndev, "lb_setup return error: %d\n", ret); + return ret; + } + + h->ae_algo->ops->stop(h); + usleep_range(10000, 20000); + + return 0; +} + +static void hns3_lp_setup_skb(struct sk_buff *skb) +{ + struct net_device *ndev = skb->dev; + unsigned char *packet; + struct ethhdr *ethh; + unsigned int i; + + skb_reserve(skb, NET_IP_ALIGN); + ethh = skb_put(skb, sizeof(struct ethhdr)); + packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE); + + memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN); + eth_zero_addr(ethh->h_source); + ethh->h_proto = htons(ETH_P_ARP); + skb_reset_mac_header(skb); + + for (i = 0; i < HNS3_NIC_LB_TEST_PACKET_SIZE; i++) + packet[i] = (unsigned char)(i & 0xff); +} + +static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring, + struct sk_buff *skb) +{ + struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector; + unsigned char *packet = skb->data; + u32 i; + + for (i = 0; i < skb->len; i++) + if (packet[i] != (unsigned char)(i & 0xff)) + break; + + /* The packet is correctly received */ + if (i == skb->len) + tqp_vector->rx_group.total_packets++; + else + print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1, + skb->data, skb->len, true); + + dev_kfree_skb_any(skb); +} + +static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget) +{ + struct hnae3_handle *h = priv->ae_handle; + struct hnae3_knic_private_info *kinfo; + u32 i, rcv_good_pkt_total = 0; + + kinfo = &h->kinfo; + for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) { + struct hns3_enet_ring *ring = priv->ring_data[i].ring; + struct hns3_enet_ring_group *rx_group; + u64 pre_rx_pkt; + + rx_group = &ring->tqp_vector->rx_group; + pre_rx_pkt = rx_group->total_packets; + + hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data); + + rcv_good_pkt_total += (rx_group->total_packets - pre_rx_pkt); + rx_group->total_packets = pre_rx_pkt; + } + return rcv_good_pkt_total; +} + +static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid, + u32 end_ringid, u32 budget) +{ + u32 i; + + for (i = start_ringid; i <= end_ringid; i++) { + struct hns3_enet_ring *ring = priv->ring_data[i].ring; + + hns3_clean_tx_ring(ring, budget); + } +} + +/** + * hns3_lp_run_test - run loopback test + * @ndev: net device + * @mode: loopback type + */ +static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct sk_buff *skb; + u32 i, good_cnt; + int ret_val = 0; + + skb = alloc_skb(HNS3_NIC_LB_TEST_PACKET_SIZE + ETH_HLEN + NET_IP_ALIGN, + GFP_KERNEL); + if (!skb) + return HNS3_NIC_LB_TEST_NO_MEM_ERR; + + skb->dev = ndev; + hns3_lp_setup_skb(skb); + skb->queue_mapping = HNS3_NIC_LB_TEST_RING_ID; + + good_cnt = 0; + for (i = 0; i < HNS3_NIC_LB_TEST_PKT_NUM; i++) { + netdev_tx_t tx_ret; + + skb_get(skb); + tx_ret = hns3_nic_net_xmit(skb, ndev); + if (tx_ret == NETDEV_TX_OK) + good_cnt++; + else + netdev_err(ndev, "hns3_lb_run_test xmit failed: %d\n", + tx_ret); + } + if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) { + ret_val = HNS3_NIC_LB_TEST_TX_CNT_ERR; + netdev_err(ndev, "mode %d sent fail, cnt=0x%x, budget=0x%x\n", + mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM); + goto out; + } + + /* Allow 200 milliseconds for packets to go from Tx to Rx */ + msleep(200); + + good_cnt = hns3_lb_check_rx_ring(priv, HNS3_NIC_LB_TEST_PKT_NUM); + if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) { + ret_val = HNS3_NIC_LB_TEST_RX_CNT_ERR; + netdev_err(ndev, "mode %d recv fail, cnt=0x%x, budget=0x%x\n", + mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM); + } + +out: + hns3_lb_clear_tx_ring(priv, HNS3_NIC_LB_TEST_RING_ID, + HNS3_NIC_LB_TEST_RING_ID, + HNS3_NIC_LB_TEST_PKT_NUM); + + kfree_skb(skb); + return ret_val; +} + +/** + * hns3_nic_self_test - self test + * @ndev: net device + * @eth_test: test cmd + * @data: test result + */ +static void hns3_self_test(struct net_device *ndev, + struct ethtool_test *eth_test, u64 *data) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + int st_param[HNS3_SELF_TEST_TPYE_NUM][2]; + bool if_running = netif_running(ndev); + int test_index = 0; + u32 i; + + /* Only do offline selftest, or pass by default */ + if (eth_test->flags != ETH_TEST_FL_OFFLINE) + return; + + st_param[HNAE3_MAC_INTER_LOOP_MAC][0] = HNAE3_MAC_INTER_LOOP_MAC; + st_param[HNAE3_MAC_INTER_LOOP_MAC][1] = + h->flags & HNAE3_SUPPORT_MAC_LOOPBACK; + + if (if_running) + dev_close(ndev); + + set_bit(HNS3_NIC_STATE_TESTING, &priv->state); + + for (i = 0; i < HNS3_SELF_TEST_TPYE_NUM; i++) { + enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0]; + + if (!st_param[i][1]) + continue; + + data[test_index] = hns3_lp_up(ndev, loop_type); + if (!data[test_index]) { + data[test_index] = hns3_lp_run_test(ndev, loop_type); + hns3_lp_down(ndev); + } + + if (data[test_index]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + test_index++; + } + + clear_bit(HNS3_NIC_STATE_TESTING, &priv->state); + + if (if_running) + dev_open(ndev); +} + +static void hns3_driv_to_eth_caps(u32 caps, struct ethtool_link_ksettings *cmd, + bool is_advertised) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(hns3_lm_map); i++) { + if (!(caps & hns3_lm_map[i].hns3_link_mode)) + continue; + + if (is_advertised) + __set_bit(hns3_lm_map[i].ethtool_link_mode, + cmd->link_modes.advertising); + else + __set_bit(hns3_lm_map[i].ethtool_link_mode, + cmd->link_modes.supported); + } +} + +static int hns3_get_sset_count(struct net_device *netdev, int stringset) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + const struct hnae3_ae_ops *ops = h->ae_algo->ops; + + if (!ops->get_sset_count) + return -EOPNOTSUPP; + + switch (stringset) { + case ETH_SS_STATS: + return ((HNS3_TQP_STATS_COUNT * h->kinfo.num_tqps) + + ops->get_sset_count(h, stringset)); + + case ETH_SS_TEST: + return ops->get_sset_count(h, stringset); + } + + return 0; +} + +static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, + u32 stat_count, u32 num_tqps) +{ +#define MAX_PREFIX_SIZE (8 + 4) + u32 size_left; + u32 i, j; + u32 n1; + + for (i = 0; i < num_tqps; i++) { + for (j = 0; j < stat_count; j++) { + data[ETH_GSTRING_LEN - 1] = '\0'; + + /* first, prepend the prefix string */ + n1 = snprintf(data, MAX_PREFIX_SIZE, "rcb_q%d_", i); + n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1); + size_left = (ETH_GSTRING_LEN - 1) - n1; + + /* now, concatenate the stats string to it */ + strncat(data, stats[j].stats_string, size_left); + data += ETH_GSTRING_LEN; + } + } + + return data; +} + +static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + + /* get strings for Tx */ + data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT, + kinfo->num_tqps); + + /* get strings for Rx */ + data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT, + kinfo->num_tqps); + + return data; +} + +static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + const struct hnae3_ae_ops *ops = h->ae_algo->ops; + char *buff = (char *)data; + + if (!ops->get_strings) + return; + + switch (stringset) { + case ETH_SS_STATS: + buff = hns3_get_strings_tqps(h, buff); + h->ae_algo->ops->get_strings(h, stringset, (u8 *)buff); + break; + case ETH_SS_TEST: + ops->get_strings(h, stringset, data); + break; + } +} + +static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) +{ + struct hns3_nic_priv *nic_priv = (struct hns3_nic_priv *)handle->priv; + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hns3_enet_ring *ring; + u8 *stat; + u32 i; + + /* get stats for Tx */ + for (i = 0; i < kinfo->num_tqps; i++) { + ring = nic_priv->ring_data[i].ring; + for (i = 0; i < HNS3_TXQ_STATS_COUNT; i++) { + stat = (u8 *)ring + hns3_txq_stats[i].stats_offset; + *data++ = *(u64 *)stat; + } + } + + /* get stats for Rx */ + for (i = 0; i < kinfo->num_tqps; i++) { + ring = nic_priv->ring_data[i + kinfo->num_tqps].ring; + for (i = 0; i < HNS3_RXQ_STATS_COUNT; i++) { + stat = (u8 *)ring + hns3_rxq_stats[i].stats_offset; + *data++ = *(u64 *)stat; + } + } + + return data; +} + +/* hns3_get_stats - get detail statistics. + * @netdev: net device + * @stats: statistics info. + * @data: statistics data. + */ +static void hns3_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + u64 *p = data; + + if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) { + netdev_err(netdev, "could not get any statistics\n"); + return; + } + + h->ae_algo->ops->update_stats(h, &netdev->stats); + + /* get per-queue stats */ + p = hns3_get_stats_tqps(h, p); + + /* get MAC & other misc hardware stats */ + h->ae_algo->ops->get_stats(h, p); +} + +static void hns3_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + strncpy(drvinfo->version, hns3_driver_version, + sizeof(drvinfo->version)); + drvinfo->version[sizeof(drvinfo->version) - 1] = '\0'; + + strncpy(drvinfo->driver, h->pdev->driver->name, + sizeof(drvinfo->driver)); + drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0'; + + strncpy(drvinfo->bus_info, pci_name(h->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0'; + + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", + priv->ae_handle->ae_algo->ops->get_fw_version(h)); +} + +static u32 hns3_get_link(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_status) + return h->ae_algo->ops->get_status(h); + else + return 0; +} + +static void hns3_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *param) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int queue_num = h->kinfo.num_tqps; + + param->tx_max_pending = HNS3_RING_MAX_PENDING; + param->rx_max_pending = HNS3_RING_MAX_PENDING; + + param->tx_pending = priv->ring_data[0].ring->desc_num; + param->rx_pending = priv->ring_data[queue_num].ring->desc_num; +} + +static void hns3_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *param) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_pauseparam) + h->ae_algo->ops->get_pauseparam(h, ¶m->autoneg, + ¶m->rx_pause, ¶m->tx_pause); +} + +static int hns3_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + u32 supported_caps; + u32 advertised_caps; + u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN; + u8 link_stat; + + if (!h->ae_algo || !h->ae_algo->ops) + return -EOPNOTSUPP; + + /* 1.auto_neg & speed & duplex from cmd */ + if (netdev->phydev) + phy_ethtool_ksettings_get(netdev->phydev, cmd); + else if (h->ae_algo->ops->get_ksettings_an_result) + h->ae_algo->ops->get_ksettings_an_result(h, + &cmd->base.autoneg, + &cmd->base.speed, + &cmd->base.duplex); + else + return -EOPNOTSUPP; + + link_stat = hns3_get_link(netdev); + if (!link_stat) { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + + /* 2.media_type get from bios parameter block */ + if (h->ae_algo->ops->get_media_type) { + h->ae_algo->ops->get_media_type(h, &media_type); + + switch (media_type) { + case HNAE3_MEDIA_TYPE_FIBER: + cmd->base.port = PORT_FIBRE; + supported_caps = HNS3_LM_FIBRE_BIT | + HNS3_LM_AUTONEG_BIT | + HNS3_LM_PAUSE_BIT | + HNS3_LM_1000BASET_FULL_BIT; + + advertised_caps = supported_caps; + break; + case HNAE3_MEDIA_TYPE_COPPER: + cmd->base.port = PORT_TP; + supported_caps = HNS3_LM_TP_BIT | + HNS3_LM_AUTONEG_BIT | + HNS3_LM_PAUSE_BIT | + HNS3_LM_1000BASET_FULL_BIT | + HNS3_LM_100BASET_FULL_BIT | + HNS3_LM_100BASET_HALF_BIT | + HNS3_LM_10BASET_FULL_BIT | + HNS3_LM_10BASET_HALF_BIT; + advertised_caps = supported_caps; + break; + case HNAE3_MEDIA_TYPE_BACKPLANE: + cmd->base.port = PORT_NONE; + supported_caps = HNS3_LM_BACKPLANE_BIT | + HNS3_LM_PAUSE_BIT | + HNS3_LM_AUTONEG_BIT | + HNS3_LM_1000BASET_FULL_BIT | + HNS3_LM_100BASET_FULL_BIT | + HNS3_LM_100BASET_HALF_BIT | + HNS3_LM_10BASET_FULL_BIT | + HNS3_LM_10BASET_HALF_BIT; + + advertised_caps = supported_caps; + break; + case HNAE3_MEDIA_TYPE_UNKNOWN: + default: + cmd->base.port = PORT_OTHER; + supported_caps = 0; + advertised_caps = 0; + break; + } + + if (!cmd->base.autoneg) + advertised_caps &= ~HNS3_LM_AUTONEG_BIT; + + /* now, map driver link modes to ethtool link modes */ + hns3_driv_to_eth_caps(supported_caps, cmd, false); + hns3_driv_to_eth_caps(advertised_caps, cmd, true); + } + + /* 3.mdix_ctrl&mdix get from phy reg */ + if (h->ae_algo->ops->get_mdix_mode) + h->ae_algo->ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl, + &cmd->base.eth_tp_mdix); + /* 4.mdio_support */ + cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; + + return 0; +} + +static int hns3_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + /* Only support ksettings_set for netdev with phy attached for now */ + if (netdev->phydev) + return phy_ethtool_ksettings_set(netdev->phydev, cmd); + + return -EOPNOTSUPP; +} + +static u32 hns3_get_rss_key_size(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!h->ae_algo || !h->ae_algo->ops || + !h->ae_algo->ops->get_rss_key_size) + return -EOPNOTSUPP; + + return h->ae_algo->ops->get_rss_key_size(h); +} + +static u32 hns3_get_rss_indir_size(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!h->ae_algo || !h->ae_algo->ops || + !h->ae_algo->ops->get_rss_indir_size) + return -EOPNOTSUPP; + + return h->ae_algo->ops->get_rss_indir_size(h); +} + +static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss) + return -EOPNOTSUPP; + + return h->ae_algo->ops->get_rss(h, indir, key, hfunc); +} + +static int hns3_set_rss(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss) + return -EOPNOTSUPP; + + /* currently we only support Toeplitz hash */ + if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) { + netdev_err(netdev, + "hash func not supported (only Toeplitz hash)\n"); + return -EOPNOTSUPP; + } + if (!indir) { + netdev_err(netdev, + "set rss failed for indir is empty\n"); + return -EOPNOTSUPP; + } + + return h->ae_algo->ops->set_rss(h, indir, key, hfunc); +} + +static int hns3_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss_tuple) + return -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = h->kinfo.num_tc * h->kinfo.rss_size; + break; + case ETHTOOL_GRXFH: + return h->ae_algo->ops->get_rss_tuple(h, cmd); + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, + u32 new_desc_num) +{ + struct hnae3_handle *h = priv->ae_handle; + int i; + + h->kinfo.num_desc = new_desc_num; + + for (i = 0; i < h->kinfo.num_tqps * 2; i++) + priv->ring_data[i].ring->desc_num = new_desc_num; + + return hns3_init_all_ring(priv); +} + +static int hns3_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *param) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + bool if_running = netif_running(ndev); + u32 old_desc_num, new_desc_num; + int ret; + + if (param->rx_mini_pending || param->rx_jumbo_pending) + return -EINVAL; + + if (param->tx_pending != param->rx_pending) { + netdev_err(ndev, + "Descriptors of tx and rx must be equal"); + return -EINVAL; + } + + if (param->tx_pending > HNS3_RING_MAX_PENDING || + param->tx_pending < HNS3_RING_MIN_PENDING) { + netdev_err(ndev, + "Descriptors requested (Tx/Rx: %d) out of range [%d-%d]\n", + param->tx_pending, HNS3_RING_MIN_PENDING, + HNS3_RING_MAX_PENDING); + return -EINVAL; + } + + new_desc_num = param->tx_pending; + + /* Hardware requires that its descriptors must be multiple of eight */ + new_desc_num = ALIGN(new_desc_num, HNS3_RING_BD_MULTIPLE); + old_desc_num = h->kinfo.num_desc; + if (old_desc_num == new_desc_num) + return 0; + + netdev_info(ndev, + "Changing descriptor count from %d to %d.\n", + old_desc_num, new_desc_num); + + if (if_running) + dev_close(ndev); + + ret = hns3_uninit_all_ring(priv); + if (ret) + return ret; + + ret = hns3_change_all_ring_bd_num(priv, new_desc_num); + if (ret) { + ret = hns3_change_all_ring_bd_num(priv, old_desc_num); + if (ret) { + netdev_err(ndev, + "Revert to old bd num fail, ret=%d.\n", ret); + return ret; + } + } + + if (if_running) + ret = dev_open(ndev); + + return ret; +} + +static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss_tuple) + return -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + return h->ae_algo->ops->set_rss_tuple(h, cmd); + default: + return -EOPNOTSUPP; + } +} + +static int hns3_nway_reset(struct net_device *netdev) +{ + struct phy_device *phy = netdev->phydev; + + if (!netif_running(netdev)) + return 0; + + /* Only support nway_reset for netdev with phy attached for now */ + if (!phy) + return -EOPNOTSUPP; + + if (phy->autoneg != AUTONEG_ENABLE) + return -EINVAL; + + return genphy_restart_aneg(phy); +} + +static const struct ethtool_ops hns3vf_ethtool_ops = { + .get_drvinfo = hns3_get_drvinfo, + .get_ringparam = hns3_get_ringparam, + .set_ringparam = hns3_set_ringparam, + .get_strings = hns3_get_strings, + .get_ethtool_stats = hns3_get_stats, + .get_sset_count = hns3_get_sset_count, + .get_rxnfc = hns3_get_rxnfc, + .get_rxfh_key_size = hns3_get_rss_key_size, + .get_rxfh_indir_size = hns3_get_rss_indir_size, + .get_rxfh = hns3_get_rss, + .set_rxfh = hns3_set_rss, + .get_link_ksettings = hns3_get_link_ksettings, +}; + +static const struct ethtool_ops hns3_ethtool_ops = { + .self_test = hns3_self_test, + .get_drvinfo = hns3_get_drvinfo, + .get_link = hns3_get_link, + .get_ringparam = hns3_get_ringparam, + .set_ringparam = hns3_set_ringparam, + .get_pauseparam = hns3_get_pauseparam, + .get_strings = hns3_get_strings, + .get_ethtool_stats = hns3_get_stats, + .get_sset_count = hns3_get_sset_count, + .get_rxnfc = hns3_get_rxnfc, + .set_rxnfc = hns3_set_rxnfc, + .get_rxfh_key_size = hns3_get_rss_key_size, + .get_rxfh_indir_size = hns3_get_rss_indir_size, + .get_rxfh = hns3_get_rss, + .set_rxfh = hns3_set_rss, + .get_link_ksettings = hns3_get_link_ksettings, + .set_link_ksettings = hns3_set_link_ksettings, + .nway_reset = hns3_nway_reset, +}; + +void hns3_ethtool_set_ops(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->flags & HNAE3_SUPPORT_VF) + netdev->ethtool_ops = &hns3vf_ethtool_ops; + else + netdev->ethtool_ops = &hns3_ethtool_ops; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile index d2b20d01a58c..d077fa02b66e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile @@ -8,8 +8,3 @@ obj-$(CONFIG_HNS3_HCLGE) += hclge.o hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o - -obj-$(CONFIG_HNS3_ENET) += hns3.o -hns3-objs = hns3_enet.o hns3_ethtool.o - -hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c deleted file mode 100644 index 925619a7c50a..000000000000 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright (c) 2016-2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include "hnae3.h" -#include "hns3_enet.h" - -static -int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets) -{ - struct hnae3_handle *h = hns3_get_handle(ndev); - - if (h->kinfo.dcb_ops->ieee_getets) - return h->kinfo.dcb_ops->ieee_getets(h, ets); - - return -EOPNOTSUPP; -} - -static -int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets) -{ - struct hnae3_handle *h = hns3_get_handle(ndev); - - if (h->kinfo.dcb_ops->ieee_setets) - return h->kinfo.dcb_ops->ieee_setets(h, ets); - - return -EOPNOTSUPP; -} - -static -int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc) -{ - struct hnae3_handle *h = hns3_get_handle(ndev); - - if (h->kinfo.dcb_ops->ieee_getpfc) - return h->kinfo.dcb_ops->ieee_getpfc(h, pfc); - - return -EOPNOTSUPP; -} - -static -int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc) -{ - struct hnae3_handle *h = hns3_get_handle(ndev); - - if (h->kinfo.dcb_ops->ieee_setpfc) - return h->kinfo.dcb_ops->ieee_setpfc(h, pfc); - - return -EOPNOTSUPP; -} - -/* DCBX configuration */ -static u8 hns3_dcbnl_getdcbx(struct net_device *ndev) -{ - struct hnae3_handle *h = hns3_get_handle(ndev); - - if (h->kinfo.dcb_ops->getdcbx) - return h->kinfo.dcb_ops->getdcbx(h); - - return 0; -} - -/* return 0 if successful, otherwise fail */ -static u8 hns3_dcbnl_setdcbx(struct net_device *ndev, u8 mode) -{ - struct hnae3_handle *h = hns3_get_handle(ndev); - - if (h->kinfo.dcb_ops->setdcbx) - return h->kinfo.dcb_ops->setdcbx(h, mode); - - return 1; -} - -static const struct dcbnl_rtnl_ops hns3_dcbnl_ops = { - .ieee_getets = hns3_dcbnl_ieee_getets, - .ieee_setets = hns3_dcbnl_ieee_setets, - .ieee_getpfc = hns3_dcbnl_ieee_getpfc, - .ieee_setpfc = hns3_dcbnl_ieee_setpfc, - .getdcbx = hns3_dcbnl_getdcbx, - .setdcbx = hns3_dcbnl_setdcbx, -}; - -/* hclge_dcbnl_setup - DCBNL setup - * @handle: the corresponding vport handle - * Set up DCBNL - */ -void hns3_dcbnl_setup(struct hnae3_handle *handle) -{ - struct net_device *dev = handle->kinfo.netdev; - - if (!handle->kinfo.dcb_ops) - return; - - dev->dcbnl_ops = &hns3_dcbnl_ops; -} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c deleted file mode 100644 index 59415090ff0f..000000000000 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ /dev/null @@ -1,3214 +0,0 @@ -/* - * Copyright (c) 2016~2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "hnae3.h" -#include "hns3_enet.h" - -static const char hns3_driver_name[] = "hns3"; -const char hns3_driver_version[] = VERMAGIC_STRING; -static const char hns3_driver_string[] = - "Hisilicon Ethernet Network Driver for Hip08 Family"; -static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; -static struct hnae3_client client; - -/* hns3_pci_tbl - PCI Device ID Table - * - * Last entry must be all 0s - * - * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, - * Class, Class Mask, private data (not used) } - */ -static const struct pci_device_id hns3_pci_tbl[] = { - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), - HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), - HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), - HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), - HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), - HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, - /* required last entry */ - {0, } -}; -MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); - -static irqreturn_t hns3_irq_handle(int irq, void *dev) -{ - struct hns3_enet_tqp_vector *tqp_vector = dev; - - napi_schedule(&tqp_vector->napi); - - return IRQ_HANDLED; -} - -static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) -{ - struct hns3_enet_tqp_vector *tqp_vectors; - unsigned int i; - - for (i = 0; i < priv->vector_num; i++) { - tqp_vectors = &priv->tqp_vector[i]; - - if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) - continue; - - /* release the irq resource */ - free_irq(tqp_vectors->vector_irq, tqp_vectors); - tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; - } -} - -static int hns3_nic_init_irq(struct hns3_nic_priv *priv) -{ - struct hns3_enet_tqp_vector *tqp_vectors; - int txrx_int_idx = 0; - int rx_int_idx = 0; - int tx_int_idx = 0; - unsigned int i; - int ret; - - for (i = 0; i < priv->vector_num; i++) { - tqp_vectors = &priv->tqp_vector[i]; - - if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) - continue; - - if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { - snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, - "%s-%s-%d", priv->netdev->name, "TxRx", - txrx_int_idx++); - txrx_int_idx++; - } else if (tqp_vectors->rx_group.ring) { - snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, - "%s-%s-%d", priv->netdev->name, "Rx", - rx_int_idx++); - } else if (tqp_vectors->tx_group.ring) { - snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, - "%s-%s-%d", priv->netdev->name, "Tx", - tx_int_idx++); - } else { - /* Skip this unused q_vector */ - continue; - } - - tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; - - ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, - tqp_vectors->name, - tqp_vectors); - if (ret) { - netdev_err(priv->netdev, "request irq(%d) fail\n", - tqp_vectors->vector_irq); - return ret; - } - - tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; - } - - return 0; -} - -static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, - u32 mask_en) -{ - writel(mask_en, tqp_vector->mask_addr); -} - -static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) -{ - napi_enable(&tqp_vector->napi); - - /* enable vector */ - hns3_mask_vector_irq(tqp_vector, 1); -} - -static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) -{ - /* disable vector */ - hns3_mask_vector_irq(tqp_vector, 0); - - disable_irq(tqp_vector->vector_irq); - napi_disable(&tqp_vector->napi); -} - -static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector, - u32 gl_value) -{ - /* this defines the configuration for GL (Interrupt Gap Limiter) - * GL defines inter interrupt gap. - * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing - */ - writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); - writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); - writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET); -} - -static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector, - u32 rl_value) -{ - /* this defines the configuration for RL (Interrupt Rate Limiter). - * Rl defines rate of interrupts i.e. number of interrupts-per-second - * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing - */ - writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); -} - -static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector) -{ - /* initialize the configuration for interrupt coalescing. - * 1. GL (Interrupt Gap Limiter) - * 2. RL (Interrupt Rate Limiter) - */ - - /* Default :enable interrupt coalesce */ - tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K; - tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K; - hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K); - /* for now we are disabling Interrupt RL - we - * will re-enable later - */ - hns3_set_vector_coalesc_rl(tqp_vector, 0); - tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW; - tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW; -} - -static int hns3_nic_set_real_num_queue(struct net_device *netdev) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - struct hnae3_knic_private_info *kinfo = &h->kinfo; - unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; - int ret; - - ret = netif_set_real_num_tx_queues(netdev, queue_size); - if (ret) { - netdev_err(netdev, - "netif_set_real_num_tx_queues fail, ret=%d!\n", - ret); - return ret; - } - - ret = netif_set_real_num_rx_queues(netdev, queue_size); - if (ret) { - netdev_err(netdev, - "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); - return ret; - } - - return 0; -} - -static int hns3_nic_net_up(struct net_device *netdev) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; - int i, j; - int ret; - - /* get irq resource for all vectors */ - ret = hns3_nic_init_irq(priv); - if (ret) { - netdev_err(netdev, "hns init irq failed! ret=%d\n", ret); - return ret; - } - - /* enable the vectors */ - for (i = 0; i < priv->vector_num; i++) - hns3_vector_enable(&priv->tqp_vector[i]); - - /* start the ae_dev */ - ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; - if (ret) - goto out_start_err; - - return 0; - -out_start_err: - for (j = i - 1; j >= 0; j--) - hns3_vector_disable(&priv->tqp_vector[j]); - - hns3_nic_uninit_irq(priv); - - return ret; -} - -static int hns3_nic_net_open(struct net_device *netdev) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - int ret; - - netif_carrier_off(netdev); - - ret = hns3_nic_set_real_num_queue(netdev); - if (ret) - return ret; - - ret = hns3_nic_net_up(netdev); - if (ret) { - netdev_err(netdev, - "hns net up fail, ret=%d!\n", ret); - return ret; - } - - priv->last_reset_time = jiffies; - return 0; -} - -static void hns3_nic_net_down(struct net_device *netdev) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - const struct hnae3_ae_ops *ops; - int i; - - /* stop ae_dev */ - ops = priv->ae_handle->ae_algo->ops; - if (ops->stop) - ops->stop(priv->ae_handle); - - /* disable vectors */ - for (i = 0; i < priv->vector_num; i++) - hns3_vector_disable(&priv->tqp_vector[i]); - - /* free irq resources */ - hns3_nic_uninit_irq(priv); -} - -static int hns3_nic_net_stop(struct net_device *netdev) -{ - netif_tx_stop_all_queues(netdev); - netif_carrier_off(netdev); - - hns3_nic_net_down(netdev); - - return 0; -} - -static int hns3_nic_uc_sync(struct net_device *netdev, - const unsigned char *addr) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (h->ae_algo->ops->add_uc_addr) - return h->ae_algo->ops->add_uc_addr(h, addr); - - return 0; -} - -static int hns3_nic_uc_unsync(struct net_device *netdev, - const unsigned char *addr) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (h->ae_algo->ops->rm_uc_addr) - return h->ae_algo->ops->rm_uc_addr(h, addr); - - return 0; -} - -static int hns3_nic_mc_sync(struct net_device *netdev, - const unsigned char *addr) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (h->ae_algo->ops->add_mc_addr) - return h->ae_algo->ops->add_mc_addr(h, addr); - - return 0; -} - -static int hns3_nic_mc_unsync(struct net_device *netdev, - const unsigned char *addr) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (h->ae_algo->ops->rm_mc_addr) - return h->ae_algo->ops->rm_mc_addr(h, addr); - - return 0; -} - -static void hns3_nic_set_rx_mode(struct net_device *netdev) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (h->ae_algo->ops->set_promisc_mode) { - if (netdev->flags & IFF_PROMISC) - h->ae_algo->ops->set_promisc_mode(h, 1); - else - h->ae_algo->ops->set_promisc_mode(h, 0); - } - if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync)) - netdev_err(netdev, "sync uc address fail\n"); - if (netdev->flags & IFF_MULTICAST) - if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync)) - netdev_err(netdev, "sync mc address fail\n"); -} - -static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, - u16 *mss, u32 *type_cs_vlan_tso) -{ - u32 l4_offset, hdr_len; - union l3_hdr_info l3; - union l4_hdr_info l4; - u32 l4_paylen; - int ret; - - if (!skb_is_gso(skb)) - return 0; - - ret = skb_cow_head(skb, 0); - if (ret) - return ret; - - l3.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); - - /* Software should clear the IPv4's checksum field when tso is - * needed. - */ - if (l3.v4->version == 4) - l3.v4->check = 0; - - /* tunnel packet.*/ - if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | - SKB_GSO_GRE_CSUM | - SKB_GSO_UDP_TUNNEL | - SKB_GSO_UDP_TUNNEL_CSUM)) { - if ((!(skb_shinfo(skb)->gso_type & - SKB_GSO_PARTIAL)) && - (skb_shinfo(skb)->gso_type & - SKB_GSO_UDP_TUNNEL_CSUM)) { - /* Software should clear the udp's checksum - * field when tso is needed. - */ - l4.udp->check = 0; - } - /* reset l3&l4 pointers from outer to inner headers */ - l3.hdr = skb_inner_network_header(skb); - l4.hdr = skb_inner_transport_header(skb); - - /* Software should clear the IPv4's checksum field when - * tso is needed. - */ - if (l3.v4->version == 4) - l3.v4->check = 0; - } - - /* normal or tunnel packet*/ - l4_offset = l4.hdr - skb->data; - hdr_len = (l4.tcp->doff * 4) + l4_offset; - - /* remove payload length from inner pseudo checksum when tso*/ - l4_paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.tcp->check, - (__force __wsum)htonl(l4_paylen)); - - /* find the txbd field values */ - *paylen = skb->len - hdr_len; - hnae_set_bit(*type_cs_vlan_tso, - HNS3_TXD_TSO_B, 1); - - /* get MSS for TSO */ - *mss = skb_shinfo(skb)->gso_size; - - return 0; -} - -static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, - u8 *il4_proto) -{ - union { - struct iphdr *v4; - struct ipv6hdr *v6; - unsigned char *hdr; - } l3; - unsigned char *l4_hdr; - unsigned char *exthdr; - u8 l4_proto_tmp; - __be16 frag_off; - - /* find outer header point */ - l3.hdr = skb_network_header(skb); - l4_hdr = skb_inner_transport_header(skb); - - if (skb->protocol == htons(ETH_P_IPV6)) { - exthdr = l3.hdr + sizeof(*l3.v6); - l4_proto_tmp = l3.v6->nexthdr; - if (l4_hdr != exthdr) - ipv6_skip_exthdr(skb, exthdr - skb->data, - &l4_proto_tmp, &frag_off); - } else if (skb->protocol == htons(ETH_P_IP)) { - l4_proto_tmp = l3.v4->protocol; - } else { - return -EINVAL; - } - - *ol4_proto = l4_proto_tmp; - - /* tunnel packet */ - if (!skb->encapsulation) { - *il4_proto = 0; - return 0; - } - - /* find inner header point */ - l3.hdr = skb_inner_network_header(skb); - l4_hdr = skb_inner_transport_header(skb); - - if (l3.v6->version == 6) { - exthdr = l3.hdr + sizeof(*l3.v6); - l4_proto_tmp = l3.v6->nexthdr; - if (l4_hdr != exthdr) - ipv6_skip_exthdr(skb, exthdr - skb->data, - &l4_proto_tmp, &frag_off); - } else if (l3.v4->version == 4) { - l4_proto_tmp = l3.v4->protocol; - } - - *il4_proto = l4_proto_tmp; - - return 0; -} - -static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, - u8 il4_proto, u32 *type_cs_vlan_tso, - u32 *ol_type_vlan_len_msec) -{ - union { - struct iphdr *v4; - struct ipv6hdr *v6; - unsigned char *hdr; - } l3; - union { - struct tcphdr *tcp; - struct udphdr *udp; - struct gre_base_hdr *gre; - unsigned char *hdr; - } l4; - unsigned char *l2_hdr; - u8 l4_proto = ol4_proto; - u32 ol2_len; - u32 ol3_len; - u32 ol4_len; - u32 l2_len; - u32 l3_len; - - l3.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); - - /* compute L2 header size for normal packet, defined in 2 Bytes */ - l2_len = l3.hdr - skb->data; - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, l2_len >> 1); - - /* tunnel packet*/ - if (skb->encapsulation) { - /* compute OL2 header size, defined in 2 Bytes */ - ol2_len = l2_len; - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, ol2_len >> 1); - - /* compute OL3 header size, defined in 4 Bytes */ - ol3_len = l4.hdr - l3.hdr; - hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, - HNS3_TXD_L3LEN_S, ol3_len >> 2); - - /* MAC in UDP, MAC in GRE (0x6558)*/ - if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { - /* switch MAC header ptr from outer to inner header.*/ - l2_hdr = skb_inner_mac_header(skb); - - /* compute OL4 header size, defined in 4 Bytes. */ - ol4_len = l2_hdr - l4.hdr; - hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, ol4_len >> 2); - - /* switch IP header ptr from outer to inner header */ - l3.hdr = skb_inner_network_header(skb); - - /* compute inner l2 header size, defined in 2 Bytes. */ - l2_len = l3.hdr - l2_hdr; - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, l2_len >> 1); - } else { - /* skb packet types not supported by hardware, - * txbd len fild doesn't be filled. - */ - return; - } - - /* switch L4 header pointer from outer to inner */ - l4.hdr = skb_inner_transport_header(skb); - - l4_proto = il4_proto; - } - - /* compute inner(/normal) L3 header size, defined in 4 Bytes */ - l3_len = l4.hdr - l3.hdr; - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, - HNS3_TXD_L3LEN_S, l3_len >> 2); - - /* compute inner(/normal) L4 header size, defined in 4 Bytes */ - switch (l4_proto) { - case IPPROTO_TCP: - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, l4.tcp->doff); - break; - case IPPROTO_SCTP: - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2)); - break; - case IPPROTO_UDP: - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2)); - break; - default: - /* skb packet types not supported by hardware, - * txbd len fild doesn't be filled. - */ - return; - } -} - -static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, - u8 il4_proto, u32 *type_cs_vlan_tso, - u32 *ol_type_vlan_len_msec) -{ - union { - struct iphdr *v4; - struct ipv6hdr *v6; - unsigned char *hdr; - } l3; - u32 l4_proto = ol4_proto; - - l3.hdr = skb_network_header(skb); - - /* define OL3 type and tunnel type(OL4).*/ - if (skb->encapsulation) { - /* define outer network header type.*/ - if (skb->protocol == htons(ETH_P_IP)) { - if (skb_is_gso(skb)) - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_CSUM); - else - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_NO_CSUM); - - } else if (skb->protocol == htons(ETH_P_IPV6)) { - hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, - HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); - } - - /* define tunnel type(OL4).*/ - switch (l4_proto) { - case IPPROTO_UDP: - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_TUNTYPE_M, - HNS3_TXD_TUNTYPE_S, - HNS3_TUN_MAC_IN_UDP); - break; - case IPPROTO_GRE: - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_TUNTYPE_M, - HNS3_TXD_TUNTYPE_S, - HNS3_TUN_NVGRE); - break; - default: - /* drop the skb tunnel packet if hardware don't support, - * because hardware can't calculate csum when TSO. - */ - if (skb_is_gso(skb)) - return -EDOM; - - /* the stack computes the IP header already, - * driver calculate l4 checksum when not TSO. - */ - skb_checksum_help(skb); - return 0; - } - - l3.hdr = skb_inner_network_header(skb); - l4_proto = il4_proto; - } - - if (l3.v4->version == 4) { - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, - HNS3_TXD_L3T_S, HNS3_L3T_IPV4); - - /* the stack computes the IP header already, the only time we - * need the hardware to recompute it is in the case of TSO. - */ - if (skb_is_gso(skb)) - hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); - - hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); - } else if (l3.v6->version == 6) { - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, - HNS3_TXD_L3T_S, HNS3_L3T_IPV6); - hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); - } - - switch (l4_proto) { - case IPPROTO_TCP: - hnae_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_TCP); - break; - case IPPROTO_UDP: - hnae_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_UDP); - break; - case IPPROTO_SCTP: - hnae_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_SCTP); - break; - default: - /* drop the skb tunnel packet if hardware don't support, - * because hardware can't calculate csum when TSO. - */ - if (skb_is_gso(skb)) - return -EDOM; - - /* the stack computes the IP header already, - * driver calculate l4 checksum when not TSO. - */ - skb_checksum_help(skb); - return 0; - } - - return 0; -} - -static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) -{ - /* Config bd buffer end */ - hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, - HNS3_TXD_BDTYPE_M, 0); - hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); - hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); - hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); -} - -static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, - int size, dma_addr_t dma, int frag_end, - enum hns_desc_type type) -{ - struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; - struct hns3_desc *desc = &ring->desc[ring->next_to_use]; - u32 ol_type_vlan_len_msec = 0; - u16 bdtp_fe_sc_vld_ra_ri = 0; - u32 type_cs_vlan_tso = 0; - struct sk_buff *skb; - u32 paylen = 0; - u16 mss = 0; - __be16 protocol; - u8 ol4_proto; - u8 il4_proto; - int ret; - - /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ - desc_cb->priv = priv; - desc_cb->length = size; - desc_cb->dma = dma; - desc_cb->type = type; - - /* now, fill the descriptor */ - desc->addr = cpu_to_le64(dma); - desc->tx.send_size = cpu_to_le16((u16)size); - hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end); - desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri); - - if (type == DESC_TYPE_SKB) { - skb = (struct sk_buff *)priv; - paylen = skb->len; - - if (skb->ip_summed == CHECKSUM_PARTIAL) { - skb_reset_mac_len(skb); - protocol = skb->protocol; - - /* vlan packet*/ - if (protocol == htons(ETH_P_8021Q)) { - protocol = vlan_get_protocol(skb); - skb->protocol = protocol; - } - ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); - if (ret) - return ret; - hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto, - &type_cs_vlan_tso, - &ol_type_vlan_len_msec); - ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto, - &type_cs_vlan_tso, - &ol_type_vlan_len_msec); - if (ret) - return ret; - - ret = hns3_set_tso(skb, &paylen, &mss, - &type_cs_vlan_tso); - if (ret) - return ret; - } - - /* Set txbd */ - desc->tx.ol_type_vlan_len_msec = - cpu_to_le32(ol_type_vlan_len_msec); - desc->tx.type_cs_vlan_tso_len = - cpu_to_le32(type_cs_vlan_tso); - desc->tx.paylen = cpu_to_le32(paylen); - desc->tx.mss = cpu_to_le16(mss); - } - - /* move ring pointer to next.*/ - ring_ptr_move_fw(ring, next_to_use); - - return 0; -} - -static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv, - int size, dma_addr_t dma, int frag_end, - enum hns_desc_type type) -{ - unsigned int frag_buf_num; - unsigned int k; - int sizeoflast; - int ret; - - frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; - sizeoflast = size % HNS3_MAX_BD_SIZE; - sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; - - /* When the frag size is bigger than hardware, split this frag */ - for (k = 0; k < frag_buf_num; k++) { - ret = hns3_fill_desc(ring, priv, - (k == frag_buf_num - 1) ? - sizeoflast : HNS3_MAX_BD_SIZE, - dma + HNS3_MAX_BD_SIZE * k, - frag_end && (k == frag_buf_num - 1) ? 1 : 0, - (type == DESC_TYPE_SKB && !k) ? - DESC_TYPE_SKB : DESC_TYPE_PAGE); - if (ret) - return ret; - } - - return 0; -} - -static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, - struct hns3_enet_ring *ring) -{ - struct sk_buff *skb = *out_skb; - struct skb_frag_struct *frag; - int bdnum_for_frag; - int frag_num; - int buf_num; - int size; - int i; - - size = skb_headlen(skb); - buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; - - frag_num = skb_shinfo(skb)->nr_frags; - for (i = 0; i < frag_num; i++) { - frag = &skb_shinfo(skb)->frags[i]; - size = skb_frag_size(frag); - bdnum_for_frag = - (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; - if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG) - return -ENOMEM; - - buf_num += bdnum_for_frag; - } - - if (buf_num > ring_space(ring)) - return -EBUSY; - - *bnum = buf_num; - return 0; -} - -static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, - struct hns3_enet_ring *ring) -{ - struct sk_buff *skb = *out_skb; - int buf_num; - - /* No. of segments (plus a header) */ - buf_num = skb_shinfo(skb)->nr_frags + 1; - - if (buf_num > ring_space(ring)) - return -EBUSY; - - *bnum = buf_num; - - return 0; -} - -static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig) -{ - struct device *dev = ring_to_dev(ring); - unsigned int i; - - for (i = 0; i < ring->desc_num; i++) { - /* check if this is where we started */ - if (ring->next_to_use == next_to_use_orig) - break; - - /* unmap the descriptor dma address */ - if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB) - dma_unmap_single(dev, - ring->desc_cb[ring->next_to_use].dma, - ring->desc_cb[ring->next_to_use].length, - DMA_TO_DEVICE); - else - dma_unmap_page(dev, - ring->desc_cb[ring->next_to_use].dma, - ring->desc_cb[ring->next_to_use].length, - DMA_TO_DEVICE); - - /* rollback one */ - ring_ptr_move_bw(ring, next_to_use); - } -} - -netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hns3_nic_ring_data *ring_data = - &tx_ring_data(priv, skb->queue_mapping); - struct hns3_enet_ring *ring = ring_data->ring; - struct device *dev = priv->dev; - struct netdev_queue *dev_queue; - struct skb_frag_struct *frag; - int next_to_use_head; - int next_to_use_frag; - dma_addr_t dma; - int buf_num; - int seg_num; - int size; - int ret; - int i; - - /* Prefetch the data used later */ - prefetch(skb->data); - - switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { - case -EBUSY: - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_busy++; - u64_stats_update_end(&ring->syncp); - - goto out_net_tx_busy; - case -ENOMEM: - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); - netdev_err(netdev, "no memory to xmit!\n"); - - goto out_err_tx_ok; - default: - break; - } - - /* No. of segments (plus a header) */ - seg_num = skb_shinfo(skb)->nr_frags + 1; - /* Fill the first part */ - size = skb_headlen(skb); - - next_to_use_head = ring->next_to_use; - - dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma)) { - netdev_err(netdev, "TX head DMA map failed\n"); - ring->stats.sw_err_cnt++; - goto out_err_tx_ok; - } - - ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, - DESC_TYPE_SKB); - if (ret) - goto head_dma_map_err; - - next_to_use_frag = ring->next_to_use; - /* Fill the fragments */ - for (i = 1; i < seg_num; i++) { - frag = &skb_shinfo(skb)->frags[i - 1]; - size = skb_frag_size(frag); - dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma)) { - netdev_err(netdev, "TX frag(%d) DMA map failed\n", i); - ring->stats.sw_err_cnt++; - goto frag_dma_map_err; - } - ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, - seg_num - 1 == i ? 1 : 0, - DESC_TYPE_PAGE); - - if (ret) - goto frag_dma_map_err; - } - - /* Complete translate all packets */ - dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index); - netdev_tx_sent_queue(dev_queue, skb->len); - - wmb(); /* Commit all data before submit */ - - hnae_queue_xmit(ring->tqp, buf_num); - - return NETDEV_TX_OK; - -frag_dma_map_err: - hns_nic_dma_unmap(ring, next_to_use_frag); - -head_dma_map_err: - hns_nic_dma_unmap(ring, next_to_use_head); - -out_err_tx_ok: - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - -out_net_tx_busy: - netif_stop_subqueue(netdev, ring_data->queue_index); - smp_mb(); /* Commit all data before submit */ - - return NETDEV_TX_BUSY; -} - -static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - struct sockaddr *mac_addr = p; - int ret; - - if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) - return -EADDRNOTAVAIL; - - ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data); - if (ret) { - netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); - return ret; - } - - ether_addr_copy(netdev->dev_addr, mac_addr->sa_data); - - return 0; -} - -static int hns3_nic_set_features(struct net_device *netdev, - netdev_features_t features) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - - if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { - priv->ops.fill_desc = hns3_fill_desc_tso; - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; - } else { - priv->ops.fill_desc = hns3_fill_desc; - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; - } - - netdev->features = features; - return 0; -} - -static void -hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - int queue_num = priv->ae_handle->kinfo.num_tqps; - struct hns3_enet_ring *ring; - unsigned int start; - unsigned int idx; - u64 tx_bytes = 0; - u64 rx_bytes = 0; - u64 tx_pkts = 0; - u64 rx_pkts = 0; - - for (idx = 0; idx < queue_num; idx++) { - /* fetch the tx stats */ - ring = priv->ring_data[idx].ring; - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - tx_bytes += ring->stats.tx_bytes; - tx_pkts += ring->stats.tx_pkts; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); - - /* fetch the rx stats */ - ring = priv->ring_data[idx + queue_num].ring; - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - rx_bytes += ring->stats.rx_bytes; - rx_pkts += ring->stats.rx_pkts; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); - } - - stats->tx_bytes = tx_bytes; - stats->tx_packets = tx_pkts; - stats->rx_bytes = rx_bytes; - stats->rx_packets = rx_pkts; - - stats->rx_errors = netdev->stats.rx_errors; - stats->multicast = netdev->stats.multicast; - stats->rx_length_errors = netdev->stats.rx_length_errors; - stats->rx_crc_errors = netdev->stats.rx_crc_errors; - stats->rx_missed_errors = netdev->stats.rx_missed_errors; - - stats->tx_errors = netdev->stats.tx_errors; - stats->rx_dropped = netdev->stats.rx_dropped; - stats->tx_dropped = netdev->stats.tx_dropped; - stats->collisions = netdev->stats.collisions; - stats->rx_over_errors = netdev->stats.rx_over_errors; - stats->rx_frame_errors = netdev->stats.rx_frame_errors; - stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; - stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; - stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; - stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; - stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; - stats->tx_window_errors = netdev->stats.tx_window_errors; - stats->rx_compressed = netdev->stats.rx_compressed; - stats->tx_compressed = netdev->stats.tx_compressed; -} - -static void hns3_add_tunnel_port(struct net_device *netdev, u16 port, - enum hns3_udp_tnl_type type) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type]; - struct hnae3_handle *h = priv->ae_handle; - - if (udp_tnl->used && udp_tnl->dst_port == port) { - udp_tnl->used++; - return; - } - - if (udp_tnl->used) { - netdev_warn(netdev, - "UDP tunnel [%d], port [%d] offload\n", type, port); - return; - } - - udp_tnl->dst_port = port; - udp_tnl->used = 1; - /* TBD send command to hardware to add port */ - if (h->ae_algo->ops->add_tunnel_udp) - h->ae_algo->ops->add_tunnel_udp(h, port); -} - -static void hns3_del_tunnel_port(struct net_device *netdev, u16 port, - enum hns3_udp_tnl_type type) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type]; - struct hnae3_handle *h = priv->ae_handle; - - if (!udp_tnl->used || udp_tnl->dst_port != port) { - netdev_warn(netdev, - "Invalid UDP tunnel port %d\n", port); - return; - } - - udp_tnl->used--; - if (udp_tnl->used) - return; - - udp_tnl->dst_port = 0; - /* TBD send command to hardware to del port */ - if (h->ae_algo->ops->del_tunnel_udp) - h->ae_algo->ops->del_tunnel_udp(h, port); -} - -/* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports - * @netdev: This physical ports's netdev - * @ti: Tunnel information - */ -static void hns3_nic_udp_tunnel_add(struct net_device *netdev, - struct udp_tunnel_info *ti) -{ - u16 port_n = ntohs(ti->port); - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN); - break; - case UDP_TUNNEL_TYPE_GENEVE: - hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE); - break; - default: - netdev_err(netdev, "unsupported tunnel type %d\n", ti->type); - break; - } -} - -static void hns3_nic_udp_tunnel_del(struct net_device *netdev, - struct udp_tunnel_info *ti) -{ - u16 port_n = ntohs(ti->port); - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN); - break; - case UDP_TUNNEL_TYPE_GENEVE: - hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE); - break; - default: - break; - } -} - -static int hns3_setup_tc(struct net_device *netdev, void *type_data) -{ - struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; - struct hnae3_handle *h = hns3_get_handle(netdev); - struct hnae3_knic_private_info *kinfo = &h->kinfo; - u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map; - u8 tc = mqprio_qopt->qopt.num_tc; - u16 mode = mqprio_qopt->mode; - u8 hw = mqprio_qopt->qopt.hw; - bool if_running; - unsigned int i; - int ret; - - if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && - mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) - return -EOPNOTSUPP; - - if (tc > HNAE3_MAX_TC) - return -EINVAL; - - if (!netdev) - return -EINVAL; - - if_running = netif_running(netdev); - if (if_running) { - hns3_nic_net_stop(netdev); - msleep(100); - } - - ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? - kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP; - if (ret) - goto out; - - if (tc <= 1) { - netdev_reset_tc(netdev); - } else { - ret = netdev_set_num_tc(netdev, tc); - if (ret) - goto out; - - for (i = 0; i < HNAE3_MAX_TC; i++) { - if (!kinfo->tc_info[i].enable) - continue; - - netdev_set_tc_queue(netdev, - kinfo->tc_info[i].tc, - kinfo->tc_info[i].tqp_count, - kinfo->tc_info[i].tqp_offset); - } - } - - ret = hns3_nic_set_real_num_queue(netdev); - -out: - if (if_running) - hns3_nic_net_open(netdev); - - return ret; -} - -static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, - void *type_data) -{ - if (type != TC_SETUP_QDISC_MQPRIO) - return -EOPNOTSUPP; - - return hns3_setup_tc(dev, type_data); -} - -static int hns3_vlan_rx_add_vid(struct net_device *netdev, - __be16 proto, u16 vid) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - int ret = -EIO; - - if (h->ae_algo->ops->set_vlan_filter) - ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); - - return ret; -} - -static int hns3_vlan_rx_kill_vid(struct net_device *netdev, - __be16 proto, u16 vid) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - int ret = -EIO; - - if (h->ae_algo->ops->set_vlan_filter) - ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); - - return ret; -} - -static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, - u8 qos, __be16 vlan_proto) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - int ret = -EIO; - - if (h->ae_algo->ops->set_vf_vlan_filter) - ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, - qos, vlan_proto); - - return ret; -} - -static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - bool if_running = netif_running(netdev); - int ret; - - if (!h->ae_algo->ops->set_mtu) - return -EOPNOTSUPP; - - /* if this was called with netdev up then bring netdevice down */ - if (if_running) { - (void)hns3_nic_net_stop(netdev); - msleep(100); - } - - ret = h->ae_algo->ops->set_mtu(h, new_mtu); - if (ret) { - netdev_err(netdev, "failed to change MTU in hardware %d\n", - ret); - return ret; - } - - /* if the netdev was running earlier, bring it up again */ - if (if_running && hns3_nic_net_open(netdev)) - ret = -EINVAL; - - return ret; -} - -static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) -{ - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hns3_enet_ring *tx_ring = NULL; - int timeout_queue = 0; - int hw_head, hw_tail; - int i; - - /* Find the stopped queue the same way the stack does */ - for (i = 0; i < ndev->real_num_tx_queues; i++) { - struct netdev_queue *q; - unsigned long trans_start; - - q = netdev_get_tx_queue(ndev, i); - trans_start = q->trans_start; - if (netif_xmit_stopped(q) && - time_after(jiffies, - (trans_start + ndev->watchdog_timeo))) { - timeout_queue = i; - break; - } - } - - if (i == ndev->num_tx_queues) { - netdev_info(ndev, - "no netdev TX timeout queue found, timeout count: %llu\n", - priv->tx_timeout_count); - return false; - } - - tx_ring = priv->ring_data[timeout_queue].ring; - - hw_head = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_HEAD_REG); - hw_tail = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_TAIL_REG); - netdev_info(ndev, - "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n", - priv->tx_timeout_count, - timeout_queue, - tx_ring->next_to_use, - tx_ring->next_to_clean, - hw_head, - hw_tail, - readl(tx_ring->tqp_vector->mask_addr)); - - return true; -} - -static void hns3_nic_net_timeout(struct net_device *ndev) -{ - struct hns3_nic_priv *priv = netdev_priv(ndev); - unsigned long last_reset_time = priv->last_reset_time; - struct hnae3_handle *h = priv->ae_handle; - - if (!hns3_get_tx_timeo_queue_info(ndev)) - return; - - priv->tx_timeout_count++; - - /* This timeout is far away enough from last timeout, - * if timeout again,set the reset type to PF reset - */ - if (time_after(jiffies, (last_reset_time + 20 * HZ))) - priv->reset_level = HNAE3_FUNC_RESET; - - /* Don't do any new action before the next timeout */ - else if (time_before(jiffies, (last_reset_time + ndev->watchdog_timeo))) - return; - - priv->last_reset_time = jiffies; - - if (h->ae_algo->ops->reset_event) - h->ae_algo->ops->reset_event(h, priv->reset_level); - - priv->reset_level++; - if (priv->reset_level > HNAE3_GLOBAL_RESET) - priv->reset_level = HNAE3_GLOBAL_RESET; -} - -static const struct net_device_ops hns3_nic_netdev_ops = { - .ndo_open = hns3_nic_net_open, - .ndo_stop = hns3_nic_net_stop, - .ndo_start_xmit = hns3_nic_net_xmit, - .ndo_tx_timeout = hns3_nic_net_timeout, - .ndo_set_mac_address = hns3_nic_net_set_mac_address, - .ndo_change_mtu = hns3_nic_change_mtu, - .ndo_set_features = hns3_nic_set_features, - .ndo_get_stats64 = hns3_nic_get_stats64, - .ndo_setup_tc = hns3_nic_setup_tc, - .ndo_set_rx_mode = hns3_nic_set_rx_mode, - .ndo_udp_tunnel_add = hns3_nic_udp_tunnel_add, - .ndo_udp_tunnel_del = hns3_nic_udp_tunnel_del, - .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, - .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, -}; - -/* hns3_probe - Device initialization routine - * @pdev: PCI device information struct - * @ent: entry in hns3_pci_tbl - * - * hns3_probe initializes a PF identified by a pci_dev structure. - * The OS initialization, configuring of the PF private structure, - * and a hardware reset occur. - * - * Returns 0 on success, negative on failure - */ -static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - struct hnae3_ae_dev *ae_dev; - int ret; - - ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), - GFP_KERNEL); - if (!ae_dev) { - ret = -ENOMEM; - return ret; - } - - ae_dev->pdev = pdev; - ae_dev->flag = ent->driver_data; - ae_dev->dev_type = HNAE3_DEV_KNIC; - pci_set_drvdata(pdev, ae_dev); - - return hnae3_register_ae_dev(ae_dev); -} - -/* hns3_remove - Device removal routine - * @pdev: PCI device information struct - */ -static void hns3_remove(struct pci_dev *pdev) -{ - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); - - hnae3_unregister_ae_dev(ae_dev); - - devm_kfree(&pdev->dev, ae_dev); - - pci_set_drvdata(pdev, NULL); -} - -static struct pci_driver hns3_driver = { - .name = hns3_driver_name, - .id_table = hns3_pci_tbl, - .probe = hns3_probe, - .remove = hns3_remove, -}; - -/* set default feature to hns3 */ -static void hns3_set_default_feature(struct net_device *netdev) -{ - netdev->priv_flags |= IFF_UNICAST_FLT; - - netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | - NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; - - netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; - - netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; - - netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | - NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; - - netdev->vlan_features |= - NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | - NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | - NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; - - netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | - NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; -} - -static int hns3_alloc_buffer(struct hns3_enet_ring *ring, - struct hns3_desc_cb *cb) -{ - unsigned int order = hnae_page_order(ring); - struct page *p; - - p = dev_alloc_pages(order); - if (!p) - return -ENOMEM; - - cb->priv = p; - cb->page_offset = 0; - cb->reuse_flag = 0; - cb->buf = page_address(p); - cb->length = hnae_page_size(ring); - cb->type = DESC_TYPE_PAGE; - - return 0; -} - -static void hns3_free_buffer(struct hns3_enet_ring *ring, - struct hns3_desc_cb *cb) -{ - if (cb->type == DESC_TYPE_SKB) - dev_kfree_skb_any((struct sk_buff *)cb->priv); - else if (!HNAE3_IS_TX_RING(ring)) - put_page((struct page *)cb->priv); - memset(cb, 0, sizeof(*cb)); -} - -static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) -{ - cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, - cb->length, ring_to_dma_dir(ring)); - - if (dma_mapping_error(ring_to_dev(ring), cb->dma)) - return -EIO; - - return 0; -} - -static void hns3_unmap_buffer(struct hns3_enet_ring *ring, - struct hns3_desc_cb *cb) -{ - if (cb->type == DESC_TYPE_SKB) - dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, - ring_to_dma_dir(ring)); - else - dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, - ring_to_dma_dir(ring)); -} - -static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) -{ - hns3_unmap_buffer(ring, &ring->desc_cb[i]); - ring->desc[i].addr = 0; -} - -static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i) -{ - struct hns3_desc_cb *cb = &ring->desc_cb[i]; - - if (!ring->desc_cb[i].dma) - return; - - hns3_buffer_detach(ring, i); - hns3_free_buffer(ring, cb); -} - -static void hns3_free_buffers(struct hns3_enet_ring *ring) -{ - int i; - - for (i = 0; i < ring->desc_num; i++) - hns3_free_buffer_detach(ring, i); -} - -/* free desc along with its attached buffer */ -static void hns3_free_desc(struct hns3_enet_ring *ring) -{ - hns3_free_buffers(ring); - - dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, - ring->desc_num * sizeof(ring->desc[0]), - DMA_BIDIRECTIONAL); - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; -} - -static int hns3_alloc_desc(struct hns3_enet_ring *ring) -{ - int size = ring->desc_num * sizeof(ring->desc[0]); - - ring->desc = kzalloc(size, GFP_KERNEL); - if (!ring->desc) - return -ENOMEM; - - ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc, - size, DMA_BIDIRECTIONAL); - if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) { - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; - return -ENOMEM; - } - - return 0; -} - -static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, - struct hns3_desc_cb *cb) -{ - int ret; - - ret = hns3_alloc_buffer(ring, cb); - if (ret) - goto out; - - ret = hns3_map_buffer(ring, cb); - if (ret) - goto out_with_buf; - - return 0; - -out_with_buf: - hns3_free_buffer(ring, cb); -out: - return ret; -} - -static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i) -{ - int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]); - - if (ret) - return ret; - - ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); - - return 0; -} - -/* Allocate memory for raw pkg, and map with dma */ -static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) -{ - int i, j, ret; - - for (i = 0; i < ring->desc_num; i++) { - ret = hns3_alloc_buffer_attach(ring, i); - if (ret) - goto out_buffer_fail; - } - - return 0; - -out_buffer_fail: - for (j = i - 1; j >= 0; j--) - hns3_free_buffer_detach(ring, j); - return ret; -} - -/* detach a in-used buffer and replace with a reserved one */ -static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, - struct hns3_desc_cb *res_cb) -{ - hns3_unmap_buffer(ring, &ring->desc_cb[i]); - ring->desc_cb[i] = *res_cb; - ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); -} - -static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) -{ - ring->desc_cb[i].reuse_flag = 0; - ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma - + ring->desc_cb[i].page_offset); -} - -static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, - int *pkts) -{ - struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; - - (*pkts) += (desc_cb->type == DESC_TYPE_SKB); - (*bytes) += desc_cb->length; - /* desc_cb will be cleaned, after hnae_free_buffer_detach*/ - hns3_free_buffer_detach(ring, ring->next_to_clean); - - ring_ptr_move_fw(ring, next_to_clean); -} - -static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) -{ - int u = ring->next_to_use; - int c = ring->next_to_clean; - - if (unlikely(h > ring->desc_num)) - return 0; - - return u > c ? (h > c && h <= u) : (h > c || h <= u); -} - -bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) -{ - struct net_device *netdev = ring->tqp->handle->kinfo.netdev; - struct netdev_queue *dev_queue; - int bytes, pkts; - int head; - - head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); - rmb(); /* Make sure head is ready before touch any data */ - - if (is_ring_empty(ring) || head == ring->next_to_clean) - return true; /* no data to poll */ - - if (!is_valid_clean_head(ring, head)) { - netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, - ring->next_to_use, ring->next_to_clean); - - u64_stats_update_begin(&ring->syncp); - ring->stats.io_err_cnt++; - u64_stats_update_end(&ring->syncp); - return true; - } - - bytes = 0; - pkts = 0; - while (head != ring->next_to_clean && budget) { - hns3_nic_reclaim_one_desc(ring, &bytes, &pkts); - /* Issue prefetch for next Tx descriptor */ - prefetch(&ring->desc_cb[ring->next_to_clean]); - budget--; - } - - ring->tqp_vector->tx_group.total_bytes += bytes; - ring->tqp_vector->tx_group.total_packets += pkts; - - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_bytes += bytes; - ring->stats.tx_pkts += pkts; - u64_stats_update_end(&ring->syncp); - - dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); - netdev_tx_completed_queue(dev_queue, pkts, bytes); - - if (unlikely(pkts && netif_carrier_ok(netdev) && - (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) { - /* Make sure that anybody stopping the queue after this - * sees the new next_to_clean. - */ - smp_mb(); - if (netif_tx_queue_stopped(dev_queue)) { - netif_tx_wake_queue(dev_queue); - ring->stats.restart_queue++; - } - } - - return !!budget; -} - -static int hns3_desc_unused(struct hns3_enet_ring *ring) -{ - int ntc = ring->next_to_clean; - int ntu = ring->next_to_use; - - return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; -} - -static void -hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) -{ - struct hns3_desc_cb *desc_cb; - struct hns3_desc_cb res_cbs; - int i, ret; - - for (i = 0; i < cleand_count; i++) { - desc_cb = &ring->desc_cb[ring->next_to_use]; - if (desc_cb->reuse_flag) { - u64_stats_update_begin(&ring->syncp); - ring->stats.reuse_pg_cnt++; - u64_stats_update_end(&ring->syncp); - - hns3_reuse_buffer(ring, ring->next_to_use); - } else { - ret = hns3_reserve_buffer_map(ring, &res_cbs); - if (ret) { - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); - - netdev_err(ring->tqp->handle->kinfo.netdev, - "hnae reserve buffer map failed.\n"); - break; - } - hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); - } - - ring_ptr_move_fw(ring, next_to_use); - } - - wmb(); /* Make all data has been write before submit */ - writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); -} - -/* hns3_nic_get_headlen - determine size of header for LRO/GRO - * @data: pointer to the start of the headers - * @max: total length of section to find headers in - * - * This function is meant to determine the length of headers that will - * be recognized by hardware for LRO, GRO, and RSC offloads. The main - * motivation of doing this is to only perform one pull for IPv4 TCP - * packets so that we can do basic things like calculating the gso_size - * based on the average data per packet. - */ -static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag, - unsigned int max_size) -{ - unsigned char *network; - u8 hlen; - - /* This should never happen, but better safe than sorry */ - if (max_size < ETH_HLEN) - return max_size; - - /* Initialize network frame pointer */ - network = data; - - /* Set first protocol and move network header forward */ - network += ETH_HLEN; - - /* Handle any vlan tag if present */ - if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S) - == HNS3_RX_FLAG_VLAN_PRESENT) { - if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN)) - return max_size; - - network += VLAN_HLEN; - } - - /* Handle L3 protocols */ - if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S) - == HNS3_RX_FLAG_L3ID_IPV4) { - if ((typeof(max_size))(network - data) > - (max_size - sizeof(struct iphdr))) - return max_size; - - /* Access ihl as a u8 to avoid unaligned access on ia64 */ - hlen = (network[0] & 0x0F) << 2; - - /* Verify hlen meets minimum size requirements */ - if (hlen < sizeof(struct iphdr)) - return network - data; - - /* Record next protocol if header is present */ - } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S) - == HNS3_RX_FLAG_L3ID_IPV6) { - if ((typeof(max_size))(network - data) > - (max_size - sizeof(struct ipv6hdr))) - return max_size; - - /* Record next protocol */ - hlen = sizeof(struct ipv6hdr); - } else { - return network - data; - } - - /* Relocate pointer to start of L4 header */ - network += hlen; - - /* Finally sort out TCP/UDP */ - if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S) - == HNS3_RX_FLAG_L4ID_TCP) { - if ((typeof(max_size))(network - data) > - (max_size - sizeof(struct tcphdr))) - return max_size; - - /* Access doff as a u8 to avoid unaligned access on ia64 */ - hlen = (network[12] & 0xF0) >> 2; - - /* Verify hlen meets minimum size requirements */ - if (hlen < sizeof(struct tcphdr)) - return network - data; - - network += hlen; - } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S) - == HNS3_RX_FLAG_L4ID_UDP) { - if ((typeof(max_size))(network - data) > - (max_size - sizeof(struct udphdr))) - return max_size; - - network += sizeof(struct udphdr); - } - - /* If everything has gone correctly network should be the - * data section of the packet and will be the end of the header. - * If not then it probably represents the end of the last recognized - * header. - */ - if ((typeof(max_size))(network - data) < max_size) - return network - data; - else - return max_size; -} - -static void hns3_nic_reuse_page(struct sk_buff *skb, int i, - struct hns3_enet_ring *ring, int pull_len, - struct hns3_desc_cb *desc_cb) -{ - struct hns3_desc *desc; - int truesize, size; - int last_offset; - bool twobufs; - - twobufs = ((PAGE_SIZE < 8192) && - hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048); - - desc = &ring->desc[ring->next_to_clean]; - size = le16_to_cpu(desc->rx.size); - - if (twobufs) { - truesize = hnae_buf_size(ring); - } else { - truesize = ALIGN(size, L1_CACHE_BYTES); - last_offset = hnae_page_size(ring) - hnae_buf_size(ring); - } - - skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, - size - pull_len, truesize - pull_len); - - /* Avoid re-using remote pages,flag default unreuse */ - if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) - return; - - if (twobufs) { - /* If we are only owner of page we can reuse it */ - if (likely(page_count(desc_cb->priv) == 1)) { - /* Flip page offset to other buffer */ - desc_cb->page_offset ^= truesize; - - desc_cb->reuse_flag = 1; - /* bump ref count on page before it is given*/ - get_page(desc_cb->priv); - } - return; - } - - /* Move offset up to the next cache line */ - desc_cb->page_offset += truesize; - - if (desc_cb->page_offset <= last_offset) { - desc_cb->reuse_flag = 1; - /* Bump ref count on page before it is given*/ - get_page(desc_cb->priv); - } -} - -static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, - struct hns3_desc *desc) -{ - struct net_device *netdev = ring->tqp->handle->kinfo.netdev; - int l3_type, l4_type; - u32 bd_base_info; - int ol4_type; - u32 l234info; - - bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - l234info = le32_to_cpu(desc->rx.l234_info); - - skb->ip_summed = CHECKSUM_NONE; - - skb_checksum_none_assert(skb); - - if (!(netdev->features & NETIF_F_RXCSUM)) - return; - - /* check if hardware has done checksum */ - if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) - return; - - if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) || - hnae_get_bit(l234info, HNS3_RXD_L4E_B) || - hnae_get_bit(l234info, HNS3_RXD_OL3E_B) || - hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) { - netdev_err(netdev, "L3/L4 error pkt\n"); - u64_stats_update_begin(&ring->syncp); - ring->stats.l3l4_csum_err++; - u64_stats_update_end(&ring->syncp); - - return; - } - - l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M, - HNS3_RXD_L3ID_S); - l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M, - HNS3_RXD_L4ID_S); - - ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); - switch (ol4_type) { - case HNS3_OL4_TYPE_MAC_IN_UDP: - case HNS3_OL4_TYPE_NVGRE: - skb->csum_level = 1; - case HNS3_OL4_TYPE_NO_TUN: - /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ - if (l3_type == HNS3_L3_TYPE_IPV4 || - (l3_type == HNS3_L3_TYPE_IPV6 && - (l4_type == HNS3_L4_TYPE_UDP || - l4_type == HNS3_L4_TYPE_TCP || - l4_type == HNS3_L4_TYPE_SCTP))) - skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - } -} - -static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) -{ - napi_gro_receive(&ring->tqp_vector->napi, skb); -} - -static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, - struct sk_buff **out_skb, int *out_bnum) -{ - struct net_device *netdev = ring->tqp->handle->kinfo.netdev; - struct hns3_desc_cb *desc_cb; - struct hns3_desc *desc; - struct sk_buff *skb; - unsigned char *va; - u32 bd_base_info; - int pull_len; - u32 l234info; - int length; - int bnum; - - desc = &ring->desc[ring->next_to_clean]; - desc_cb = &ring->desc_cb[ring->next_to_clean]; - - prefetch(desc); - - length = le16_to_cpu(desc->rx.pkt_len); - bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - l234info = le32_to_cpu(desc->rx.l234_info); - - /* Check valid BD */ - if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B)) - return -EFAULT; - - va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; - - /* Prefetch first cache line of first page - * Idea is to cache few bytes of the header of the packet. Our L1 Cache - * line size is 64B so need to prefetch twice to make it 128B. But in - * actual we can have greater size of caches with 128B Level 1 cache - * lines. In such a case, single fetch would suffice to cache in the - * relevant part of the header. - */ - prefetch(va); -#if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); -#endif - - skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi, - HNS3_RX_HEAD_SIZE); - if (unlikely(!skb)) { - netdev_err(netdev, "alloc rx skb fail\n"); - - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); - - return -ENOMEM; - } - - prefetchw(skb->data); - - bnum = 1; - if (length <= HNS3_RX_HEAD_SIZE) { - memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); - - /* We can reuse buffer as-is, just make sure it is local */ - if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) - desc_cb->reuse_flag = 1; - else /* This page cannot be reused so discard it */ - put_page(desc_cb->priv); - - ring_ptr_move_fw(ring, next_to_clean); - } else { - u64_stats_update_begin(&ring->syncp); - ring->stats.seg_pkt_cnt++; - u64_stats_update_end(&ring->syncp); - - pull_len = hns3_nic_get_headlen(va, l234info, - HNS3_RX_HEAD_SIZE); - memcpy(__skb_put(skb, pull_len), va, - ALIGN(pull_len, sizeof(long))); - - hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); - ring_ptr_move_fw(ring, next_to_clean); - - while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) { - desc = &ring->desc[ring->next_to_clean]; - desc_cb = &ring->desc_cb[ring->next_to_clean]; - bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb); - ring_ptr_move_fw(ring, next_to_clean); - bnum++; - } - } - - *out_bnum = bnum; - - if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { - netdev_err(netdev, "no valid bd,%016llx,%016llx\n", - ((u64 *)desc)[0], ((u64 *)desc)[1]); - u64_stats_update_begin(&ring->syncp); - ring->stats.non_vld_descs++; - u64_stats_update_end(&ring->syncp); - - dev_kfree_skb_any(skb); - return -EINVAL; - } - - if (unlikely((!desc->rx.pkt_len) || - hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { - netdev_err(netdev, "truncated pkt\n"); - u64_stats_update_begin(&ring->syncp); - ring->stats.err_pkt_len++; - u64_stats_update_end(&ring->syncp); - - dev_kfree_skb_any(skb); - return -EFAULT; - } - - if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) { - netdev_err(netdev, "L2 error pkt\n"); - u64_stats_update_begin(&ring->syncp); - ring->stats.l2_err++; - u64_stats_update_end(&ring->syncp); - - dev_kfree_skb_any(skb); - return -EFAULT; - } - - u64_stats_update_begin(&ring->syncp); - ring->stats.rx_pkts++; - ring->stats.rx_bytes += skb->len; - u64_stats_update_end(&ring->syncp); - - ring->tqp_vector->rx_group.total_bytes += skb->len; - - hns3_rx_checksum(ring, skb, desc); - return 0; -} - -int hns3_clean_rx_ring( - struct hns3_enet_ring *ring, int budget, - void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) -{ -#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 - struct net_device *netdev = ring->tqp->handle->kinfo.netdev; - int recv_pkts, recv_bds, clean_count, err; - int unused_count = hns3_desc_unused(ring); - struct sk_buff *skb = NULL; - int num, bnum = 0; - - num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); - rmb(); /* Make sure num taken effect before the other data is touched */ - - recv_pkts = 0, recv_bds = 0, clean_count = 0; - num -= unused_count; - - while (recv_pkts < budget && recv_bds < num) { - /* Reuse or realloc buffers */ - if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { - hns3_nic_alloc_rx_buffers(ring, - clean_count + unused_count); - clean_count = 0; - unused_count = hns3_desc_unused(ring); - } - - /* Poll one pkt */ - err = hns3_handle_rx_bd(ring, &skb, &bnum); - if (unlikely(!skb)) /* This fault cannot be repaired */ - goto out; - - recv_bds += bnum; - clean_count += bnum; - if (unlikely(err)) { /* Do jump the err */ - recv_pkts++; - continue; - } - - /* Do update ip stack process */ - skb->protocol = eth_type_trans(skb, netdev); - rx_fn(ring, skb); - - recv_pkts++; - } - -out: - /* Make all data has been write before submit */ - if (clean_count + unused_count > 0) - hns3_nic_alloc_rx_buffers(ring, - clean_count + unused_count); - - return recv_pkts; -} - -static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) -{ -#define HNS3_RX_ULTRA_PACKET_RATE 40000 - enum hns3_flow_level_range new_flow_level; - struct hns3_enet_tqp_vector *tqp_vector; - int packets_per_secs; - int bytes_per_usecs; - u16 new_int_gl; - int usecs; - - if (!ring_group->int_gl) - return false; - - if (ring_group->total_packets == 0) { - ring_group->int_gl = HNS3_INT_GL_50K; - ring_group->flow_level = HNS3_FLOW_LOW; - return true; - } - - /* Simple throttlerate management - * 0-10MB/s lower (50000 ints/s) - * 10-20MB/s middle (20000 ints/s) - * 20-1249MB/s high (18000 ints/s) - * > 40000pps ultra (8000 ints/s) - */ - new_flow_level = ring_group->flow_level; - new_int_gl = ring_group->int_gl; - tqp_vector = ring_group->ring->tqp_vector; - usecs = (ring_group->int_gl << 1); - bytes_per_usecs = ring_group->total_bytes / usecs; - /* 1000000 microseconds */ - packets_per_secs = ring_group->total_packets * 1000000 / usecs; - - switch (new_flow_level) { - case HNS3_FLOW_LOW: - if (bytes_per_usecs > 10) - new_flow_level = HNS3_FLOW_MID; - break; - case HNS3_FLOW_MID: - if (bytes_per_usecs > 20) - new_flow_level = HNS3_FLOW_HIGH; - else if (bytes_per_usecs <= 10) - new_flow_level = HNS3_FLOW_LOW; - break; - case HNS3_FLOW_HIGH: - case HNS3_FLOW_ULTRA: - default: - if (bytes_per_usecs <= 20) - new_flow_level = HNS3_FLOW_MID; - break; - } - - if ((packets_per_secs > HNS3_RX_ULTRA_PACKET_RATE) && - (&tqp_vector->rx_group == ring_group)) - new_flow_level = HNS3_FLOW_ULTRA; - - switch (new_flow_level) { - case HNS3_FLOW_LOW: - new_int_gl = HNS3_INT_GL_50K; - break; - case HNS3_FLOW_MID: - new_int_gl = HNS3_INT_GL_20K; - break; - case HNS3_FLOW_HIGH: - new_int_gl = HNS3_INT_GL_18K; - break; - case HNS3_FLOW_ULTRA: - new_int_gl = HNS3_INT_GL_8K; - break; - default: - break; - } - - ring_group->total_bytes = 0; - ring_group->total_packets = 0; - ring_group->flow_level = new_flow_level; - if (new_int_gl != ring_group->int_gl) { - ring_group->int_gl = new_int_gl; - return true; - } - return false; -} - -static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) -{ - u16 rx_int_gl, tx_int_gl; - bool rx, tx; - - rx = hns3_get_new_int_gl(&tqp_vector->rx_group); - tx = hns3_get_new_int_gl(&tqp_vector->tx_group); - rx_int_gl = tqp_vector->rx_group.int_gl; - tx_int_gl = tqp_vector->tx_group.int_gl; - if (rx && tx) { - if (rx_int_gl > tx_int_gl) { - tqp_vector->tx_group.int_gl = rx_int_gl; - tqp_vector->tx_group.flow_level = - tqp_vector->rx_group.flow_level; - hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl); - } else { - tqp_vector->rx_group.int_gl = tx_int_gl; - tqp_vector->rx_group.flow_level = - tqp_vector->tx_group.flow_level; - hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl); - } - } -} - -static int hns3_nic_common_poll(struct napi_struct *napi, int budget) -{ - struct hns3_enet_ring *ring; - int rx_pkt_total = 0; - - struct hns3_enet_tqp_vector *tqp_vector = - container_of(napi, struct hns3_enet_tqp_vector, napi); - bool clean_complete = true; - int rx_budget; - - /* Since the actual Tx work is minimal, we can give the Tx a larger - * budget and be more aggressive about cleaning up the Tx descriptors. - */ - hns3_for_each_ring(ring, tqp_vector->tx_group) { - if (!hns3_clean_tx_ring(ring, budget)) - clean_complete = false; - } - - /* make sure rx ring budget not smaller than 1 */ - rx_budget = max(budget / tqp_vector->num_tqps, 1); - - hns3_for_each_ring(ring, tqp_vector->rx_group) { - int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, - hns3_rx_skb); - - if (rx_cleaned >= rx_budget) - clean_complete = false; - - rx_pkt_total += rx_cleaned; - } - - tqp_vector->rx_group.total_packets += rx_pkt_total; - - if (!clean_complete) - return budget; - - napi_complete(napi); - hns3_update_new_int_gl(tqp_vector); - hns3_mask_vector_irq(tqp_vector, 1); - - return rx_pkt_total; -} - -static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, - struct hnae3_ring_chain_node *head) -{ - struct pci_dev *pdev = tqp_vector->handle->pdev; - struct hnae3_ring_chain_node *cur_chain = head; - struct hnae3_ring_chain_node *chain; - struct hns3_enet_ring *tx_ring; - struct hns3_enet_ring *rx_ring; - - tx_ring = tqp_vector->tx_group.ring; - if (tx_ring) { - cur_chain->tqp_index = tx_ring->tqp->tqp_index; - hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_TX); - - cur_chain->next = NULL; - - while (tx_ring->next) { - tx_ring = tx_ring->next; - - chain = devm_kzalloc(&pdev->dev, sizeof(*chain), - GFP_KERNEL); - if (!chain) - return -ENOMEM; - - cur_chain->next = chain; - chain->tqp_index = tx_ring->tqp->tqp_index; - hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_TX); - - cur_chain = chain; - } - } - - rx_ring = tqp_vector->rx_group.ring; - if (!tx_ring && rx_ring) { - cur_chain->next = NULL; - cur_chain->tqp_index = rx_ring->tqp->tqp_index; - hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_RX); - - rx_ring = rx_ring->next; - } - - while (rx_ring) { - chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); - if (!chain) - return -ENOMEM; - - cur_chain->next = chain; - chain->tqp_index = rx_ring->tqp->tqp_index; - hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_RX); - cur_chain = chain; - - rx_ring = rx_ring->next; - } - - return 0; -} - -static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, - struct hnae3_ring_chain_node *head) -{ - struct pci_dev *pdev = tqp_vector->handle->pdev; - struct hnae3_ring_chain_node *chain_tmp, *chain; - - chain = head->next; - - while (chain) { - chain_tmp = chain->next; - devm_kfree(&pdev->dev, chain); - chain = chain_tmp; - } -} - -static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, - struct hns3_enet_ring *ring) -{ - ring->next = group->ring; - group->ring = ring; - - group->count++; -} - -static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) -{ - struct hnae3_ring_chain_node vector_ring_chain; - struct hnae3_handle *h = priv->ae_handle; - struct hns3_enet_tqp_vector *tqp_vector; - struct hnae3_vector_info *vector; - struct pci_dev *pdev = h->pdev; - u16 tqp_num = h->kinfo.num_tqps; - u16 vector_num; - int ret = 0; - u16 i; - - /* RSS size, cpu online and vector_num should be the same */ - /* Should consider 2p/4p later */ - vector_num = min_t(u16, num_online_cpus(), tqp_num); - vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), - GFP_KERNEL); - if (!vector) - return -ENOMEM; - - vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); - - priv->vector_num = vector_num; - priv->tqp_vector = (struct hns3_enet_tqp_vector *) - devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), - GFP_KERNEL); - if (!priv->tqp_vector) - return -ENOMEM; - - for (i = 0; i < tqp_num; i++) { - u16 vector_i = i % vector_num; - - tqp_vector = &priv->tqp_vector[vector_i]; - - hns3_add_ring_to_group(&tqp_vector->tx_group, - priv->ring_data[i].ring); - - hns3_add_ring_to_group(&tqp_vector->rx_group, - priv->ring_data[i + tqp_num].ring); - - tqp_vector->idx = vector_i; - tqp_vector->mask_addr = vector[vector_i].io_addr; - tqp_vector->vector_irq = vector[vector_i].vector; - tqp_vector->num_tqps++; - - priv->ring_data[i].ring->tqp_vector = tqp_vector; - priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector; - } - - for (i = 0; i < vector_num; i++) { - tqp_vector = &priv->tqp_vector[i]; - - tqp_vector->rx_group.total_bytes = 0; - tqp_vector->rx_group.total_packets = 0; - tqp_vector->tx_group.total_bytes = 0; - tqp_vector->tx_group.total_packets = 0; - hns3_vector_gl_rl_init(tqp_vector); - tqp_vector->handle = h; - - ret = hns3_get_vector_ring_chain(tqp_vector, - &vector_ring_chain); - if (ret) - goto out; - - ret = h->ae_algo->ops->map_ring_to_vector(h, - tqp_vector->vector_irq, &vector_ring_chain); - if (ret) - goto out; - - hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); - - netif_napi_add(priv->netdev, &tqp_vector->napi, - hns3_nic_common_poll, NAPI_POLL_WEIGHT); - } - -out: - devm_kfree(&pdev->dev, vector); - return ret; -} - -static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) -{ - struct hnae3_ring_chain_node vector_ring_chain; - struct hnae3_handle *h = priv->ae_handle; - struct hns3_enet_tqp_vector *tqp_vector; - struct pci_dev *pdev = h->pdev; - int i, ret; - - for (i = 0; i < priv->vector_num; i++) { - tqp_vector = &priv->tqp_vector[i]; - - ret = hns3_get_vector_ring_chain(tqp_vector, - &vector_ring_chain); - if (ret) - return ret; - - ret = h->ae_algo->ops->unmap_ring_from_vector(h, - tqp_vector->vector_irq, &vector_ring_chain); - if (ret) - return ret; - - hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); - - if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) { - (void)irq_set_affinity_hint( - priv->tqp_vector[i].vector_irq, - NULL); - free_irq(priv->tqp_vector[i].vector_irq, - &priv->tqp_vector[i]); - } - - priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED; - - netif_napi_del(&priv->tqp_vector[i].napi); - } - - devm_kfree(&pdev->dev, priv->tqp_vector); - - return 0; -} - -static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, - int ring_type) -{ - struct hns3_nic_ring_data *ring_data = priv->ring_data; - int queue_num = priv->ae_handle->kinfo.num_tqps; - struct pci_dev *pdev = priv->ae_handle->pdev; - struct hns3_enet_ring *ring; - - ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL); - if (!ring) - return -ENOMEM; - - if (ring_type == HNAE3_RING_TYPE_TX) { - ring_data[q->tqp_index].ring = ring; - ring_data[q->tqp_index].queue_index = q->tqp_index; - ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; - } else { - ring_data[q->tqp_index + queue_num].ring = ring; - ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index; - ring->io_base = q->io_base; - } - - hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); - - ring->tqp = q; - ring->desc = NULL; - ring->desc_cb = NULL; - ring->dev = priv->dev; - ring->desc_dma_addr = 0; - ring->buf_size = q->buf_size; - ring->desc_num = q->desc_num; - ring->next_to_use = 0; - ring->next_to_clean = 0; - - return 0; -} - -static int hns3_queue_to_ring(struct hnae3_queue *tqp, - struct hns3_nic_priv *priv) -{ - int ret; - - ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); - if (ret) - return ret; - - ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); - if (ret) - return ret; - - return 0; -} - -static int hns3_get_ring_config(struct hns3_nic_priv *priv) -{ - struct hnae3_handle *h = priv->ae_handle; - struct pci_dev *pdev = h->pdev; - int i, ret; - - priv->ring_data = devm_kzalloc(&pdev->dev, h->kinfo.num_tqps * - sizeof(*priv->ring_data) * 2, - GFP_KERNEL); - if (!priv->ring_data) - return -ENOMEM; - - for (i = 0; i < h->kinfo.num_tqps; i++) { - ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv); - if (ret) - goto err; - } - - return 0; -err: - devm_kfree(&pdev->dev, priv->ring_data); - return ret; -} - -static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) -{ - int ret; - - if (ring->desc_num <= 0 || ring->buf_size <= 0) - return -EINVAL; - - ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]), - GFP_KERNEL); - if (!ring->desc_cb) { - ret = -ENOMEM; - goto out; - } - - ret = hns3_alloc_desc(ring); - if (ret) - goto out_with_desc_cb; - - if (!HNAE3_IS_TX_RING(ring)) { - ret = hns3_alloc_ring_buffers(ring); - if (ret) - goto out_with_desc; - } - - return 0; - -out_with_desc: - hns3_free_desc(ring); -out_with_desc_cb: - kfree(ring->desc_cb); - ring->desc_cb = NULL; -out: - return ret; -} - -static void hns3_fini_ring(struct hns3_enet_ring *ring) -{ - hns3_free_desc(ring); - kfree(ring->desc_cb); - ring->desc_cb = NULL; - ring->next_to_clean = 0; - ring->next_to_use = 0; -} - -static int hns3_buf_size2type(u32 buf_size) -{ - int bd_size_type; - - switch (buf_size) { - case 512: - bd_size_type = HNS3_BD_SIZE_512_TYPE; - break; - case 1024: - bd_size_type = HNS3_BD_SIZE_1024_TYPE; - break; - case 2048: - bd_size_type = HNS3_BD_SIZE_2048_TYPE; - break; - case 4096: - bd_size_type = HNS3_BD_SIZE_4096_TYPE; - break; - default: - bd_size_type = HNS3_BD_SIZE_2048_TYPE; - } - - return bd_size_type; -} - -static void hns3_init_ring_hw(struct hns3_enet_ring *ring) -{ - dma_addr_t dma = ring->desc_dma_addr; - struct hnae3_queue *q = ring->tqp; - - if (!HNAE3_IS_TX_RING(ring)) { - hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, - (u32)dma); - hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, - (u32)((dma >> 31) >> 1)); - - hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG, - hns3_buf_size2type(ring->buf_size)); - hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG, - ring->desc_num / 8 - 1); - - } else { - hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG, - (u32)dma); - hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG, - (u32)((dma >> 31) >> 1)); - - hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG, - hns3_buf_size2type(ring->buf_size)); - hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG, - ring->desc_num / 8 - 1); - } -} - -int hns3_init_all_ring(struct hns3_nic_priv *priv) -{ - struct hnae3_handle *h = priv->ae_handle; - int ring_num = h->kinfo.num_tqps * 2; - int i, j; - int ret; - - for (i = 0; i < ring_num; i++) { - ret = hns3_alloc_ring_memory(priv->ring_data[i].ring); - if (ret) { - dev_err(priv->dev, - "Alloc ring memory fail! ret=%d\n", ret); - goto out_when_alloc_ring_memory; - } - - hns3_init_ring_hw(priv->ring_data[i].ring); - - u64_stats_init(&priv->ring_data[i].ring->syncp); - } - - return 0; - -out_when_alloc_ring_memory: - for (j = i - 1; j >= 0; j--) - hns3_fini_ring(priv->ring_data[j].ring); - - return -ENOMEM; -} - -int hns3_uninit_all_ring(struct hns3_nic_priv *priv) -{ - struct hnae3_handle *h = priv->ae_handle; - int i; - - for (i = 0; i < h->kinfo.num_tqps; i++) { - if (h->ae_algo->ops->reset_queue) - h->ae_algo->ops->reset_queue(h, i); - - hns3_fini_ring(priv->ring_data[i].ring); - hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); - } - - return 0; -} - -/* Set mac addr if it is configured. or leave it to the AE driver */ -static void hns3_init_mac_addr(struct net_device *netdev) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; - u8 mac_addr_temp[ETH_ALEN]; - - if (h->ae_algo->ops->get_mac_addr) { - h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); - ether_addr_copy(netdev->dev_addr, mac_addr_temp); - } - - /* Check if the MAC address is valid, if not get a random one */ - if (!is_valid_ether_addr(netdev->dev_addr)) { - eth_hw_addr_random(netdev); - dev_warn(priv->dev, "using random MAC address %pM\n", - netdev->dev_addr); - } - - if (h->ae_algo->ops->set_mac_addr) - h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr); - -} - -static void hns3_nic_set_priv_ops(struct net_device *netdev) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - - if ((netdev->features & NETIF_F_TSO) || - (netdev->features & NETIF_F_TSO6)) { - priv->ops.fill_desc = hns3_fill_desc_tso; - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; - } else { - priv->ops.fill_desc = hns3_fill_desc; - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; - } -} - -static int hns3_client_init(struct hnae3_handle *handle) -{ - struct pci_dev *pdev = handle->pdev; - struct hns3_nic_priv *priv; - struct net_device *netdev; - int ret; - - netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), - handle->kinfo.num_tqps); - if (!netdev) - return -ENOMEM; - - priv = netdev_priv(netdev); - priv->dev = &pdev->dev; - priv->netdev = netdev; - priv->ae_handle = handle; - priv->last_reset_time = jiffies; - priv->reset_level = HNAE3_FUNC_RESET; - priv->tx_timeout_count = 0; - - handle->kinfo.netdev = netdev; - handle->priv = (void *)priv; - - hns3_init_mac_addr(netdev); - - hns3_set_default_feature(netdev); - - netdev->watchdog_timeo = HNS3_TX_TIMEOUT; - netdev->priv_flags |= IFF_UNICAST_FLT; - netdev->netdev_ops = &hns3_nic_netdev_ops; - SET_NETDEV_DEV(netdev, &pdev->dev); - hns3_ethtool_set_ops(netdev); - hns3_nic_set_priv_ops(netdev); - - /* Carrier off reporting is important to ethtool even BEFORE open */ - netif_carrier_off(netdev); - - ret = hns3_get_ring_config(priv); - if (ret) { - ret = -ENOMEM; - goto out_get_ring_cfg; - } - - ret = hns3_nic_init_vector_data(priv); - if (ret) { - ret = -ENOMEM; - goto out_init_vector_data; - } - - ret = hns3_init_all_ring(priv); - if (ret) { - ret = -ENOMEM; - goto out_init_ring_data; - } - - ret = register_netdev(netdev); - if (ret) { - dev_err(priv->dev, "probe register netdev fail!\n"); - goto out_reg_netdev_fail; - } - - hns3_dcbnl_setup(handle); - - /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */ - netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); - - return ret; - -out_reg_netdev_fail: -out_init_ring_data: - (void)hns3_nic_uninit_vector_data(priv); - priv->ring_data = NULL; -out_init_vector_data: -out_get_ring_cfg: - priv->ae_handle = NULL; - free_netdev(netdev); - return ret; -} - -static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) -{ - struct net_device *netdev = handle->kinfo.netdev; - struct hns3_nic_priv *priv = netdev_priv(netdev); - int ret; - - if (netdev->reg_state != NETREG_UNINITIALIZED) - unregister_netdev(netdev); - - ret = hns3_nic_uninit_vector_data(priv); - if (ret) - netdev_err(netdev, "uninit vector error\n"); - - ret = hns3_uninit_all_ring(priv); - if (ret) - netdev_err(netdev, "uninit ring error\n"); - - priv->ring_data = NULL; - - free_netdev(netdev); -} - -static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) -{ - struct net_device *netdev = handle->kinfo.netdev; - - if (!netdev) - return; - - if (linkup) { - netif_carrier_on(netdev); - netif_tx_wake_all_queues(netdev); - netdev_info(netdev, "link up\n"); - } else { - netif_carrier_off(netdev); - netif_tx_stop_all_queues(netdev); - netdev_info(netdev, "link down\n"); - } -} - -static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) -{ - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct net_device *ndev = kinfo->netdev; - bool if_running; - int ret; - u8 i; - - if (tc > HNAE3_MAX_TC) - return -EINVAL; - - if (!ndev) - return -ENODEV; - - if_running = netif_running(ndev); - - ret = netdev_set_num_tc(ndev, tc); - if (ret) - return ret; - - if (if_running) { - (void)hns3_nic_net_stop(ndev); - msleep(100); - } - - ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ? - kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP; - if (ret) - goto err_out; - - if (tc <= 1) { - netdev_reset_tc(ndev); - goto out; - } - - for (i = 0; i < HNAE3_MAX_TC; i++) { - struct hnae3_tc_info *tc_info = &kinfo->tc_info[i]; - - if (tc_info->enable) - netdev_set_tc_queue(ndev, - tc_info->tc, - tc_info->tqp_count, - tc_info->tqp_offset); - } - - for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { - netdev_set_prio_tc_map(ndev, i, - kinfo->prio_tc[i]); - } - -out: - ret = hns3_nic_set_real_num_queue(ndev); - -err_out: - if (if_running) - (void)hns3_nic_net_open(ndev); - - return ret; -} - -static void hns3_recover_hw_addr(struct net_device *ndev) -{ - struct netdev_hw_addr_list *list; - struct netdev_hw_addr *ha, *tmp; - - /* go through and sync uc_addr entries to the device */ - list = &ndev->uc; - list_for_each_entry_safe(ha, tmp, &list->list, list) - hns3_nic_uc_sync(ndev, ha->addr); - - /* go through and sync mc_addr entries to the device */ - list = &ndev->mc; - list_for_each_entry_safe(ha, tmp, &list->list, list) - hns3_nic_mc_sync(ndev, ha->addr); -} - -static void hns3_drop_skb_data(struct hns3_enet_ring *ring, struct sk_buff *skb) -{ - dev_kfree_skb_any(skb); -} - -static void hns3_clear_all_ring(struct hnae3_handle *h) -{ - struct net_device *ndev = h->kinfo.netdev; - struct hns3_nic_priv *priv = netdev_priv(ndev); - u32 i; - - for (i = 0; i < h->kinfo.num_tqps; i++) { - struct netdev_queue *dev_queue; - struct hns3_enet_ring *ring; - - ring = priv->ring_data[i].ring; - hns3_clean_tx_ring(ring, ring->desc_num); - dev_queue = netdev_get_tx_queue(ndev, - priv->ring_data[i].queue_index); - netdev_tx_reset_queue(dev_queue); - - ring = priv->ring_data[i + h->kinfo.num_tqps].ring; - hns3_clean_rx_ring(ring, ring->desc_num, hns3_drop_skb_data); - } -} - -static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) -{ - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct net_device *ndev = kinfo->netdev; - - if (!netif_running(ndev)) - return -EIO; - - return hns3_nic_net_stop(ndev); -} - -static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) -{ - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); - int ret = 0; - - if (netif_running(kinfo->netdev)) { - ret = hns3_nic_net_up(kinfo->netdev); - if (ret) { - netdev_err(kinfo->netdev, - "hns net up fail, ret=%d!\n", ret); - return ret; - } - - priv->last_reset_time = jiffies; - } - - return ret; -} - -static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) -{ - struct net_device *netdev = handle->kinfo.netdev; - struct hns3_nic_priv *priv = netdev_priv(netdev); - int ret; - - priv->reset_level = 1; - hns3_init_mac_addr(netdev); - hns3_nic_set_rx_mode(netdev); - hns3_recover_hw_addr(netdev); - - /* Carrier off reporting is important to ethtool even BEFORE open */ - netif_carrier_off(netdev); - - ret = hns3_get_ring_config(priv); - if (ret) - return ret; - - ret = hns3_nic_init_vector_data(priv); - if (ret) - return ret; - - ret = hns3_init_all_ring(priv); - if (ret) { - hns3_nic_uninit_vector_data(priv); - priv->ring_data = NULL; - } - - return ret; -} - -static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) -{ - struct net_device *netdev = handle->kinfo.netdev; - struct hns3_nic_priv *priv = netdev_priv(netdev); - int ret; - - hns3_clear_all_ring(handle); - - ret = hns3_nic_uninit_vector_data(priv); - if (ret) { - netdev_err(netdev, "uninit vector error\n"); - return ret; - } - - ret = hns3_uninit_all_ring(priv); - if (ret) - netdev_err(netdev, "uninit ring error\n"); - - priv->ring_data = NULL; - - return ret; -} - -static int hns3_reset_notify(struct hnae3_handle *handle, - enum hnae3_reset_notify_type type) -{ - int ret = 0; - - switch (type) { - case HNAE3_UP_CLIENT: - ret = hns3_reset_notify_up_enet(handle); - break; - case HNAE3_DOWN_CLIENT: - ret = hns3_reset_notify_down_enet(handle); - break; - case HNAE3_INIT_CLIENT: - ret = hns3_reset_notify_init_enet(handle); - break; - case HNAE3_UNINIT_CLIENT: - ret = hns3_reset_notify_uninit_enet(handle); - break; - default: - break; - } - - return ret; -} - -static const struct hnae3_client_ops client_ops = { - .init_instance = hns3_client_init, - .uninit_instance = hns3_client_uninit, - .link_status_change = hns3_link_status_change, - .setup_tc = hns3_client_setup_tc, - .reset_notify = hns3_reset_notify, -}; - -/* hns3_init_module - Driver registration routine - * hns3_init_module is the first routine called when the driver is - * loaded. All it does is register with the PCI subsystem. - */ -static int __init hns3_init_module(void) -{ - int ret; - - pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); - pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); - - client.type = HNAE3_CLIENT_KNIC; - snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s", - hns3_driver_name); - - client.ops = &client_ops; - - ret = hnae3_register_client(&client); - if (ret) - return ret; - - ret = pci_register_driver(&hns3_driver); - if (ret) - hnae3_unregister_client(&client); - - return ret; -} -module_init(hns3_init_module); - -/* hns3_exit_module - Driver exit cleanup routine - * hns3_exit_module is called just before the driver is removed - * from memory. - */ -static void __exit hns3_exit_module(void) -{ - pci_unregister_driver(&hns3_driver); - hnae3_unregister_client(&client); -} -module_exit(hns3_exit_module); - -MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver"); -MODULE_AUTHOR("Huawei Tech. Co., Ltd."); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("pci:hns-nic"); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h deleted file mode 100644 index 8a9de759957b..000000000000 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h +++ /dev/null @@ -1,613 +0,0 @@ -/* - * Copyright (c) 2016 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef __HNS3_ENET_H -#define __HNS3_ENET_H - -#include "hnae3.h" - -extern const char hns3_driver_version[]; - -enum hns3_nic_state { - HNS3_NIC_STATE_TESTING, - HNS3_NIC_STATE_RESETTING, - HNS3_NIC_STATE_REINITING, - HNS3_NIC_STATE_DOWN, - HNS3_NIC_STATE_DISABLED, - HNS3_NIC_STATE_REMOVING, - HNS3_NIC_STATE_SERVICE_INITED, - HNS3_NIC_STATE_SERVICE_SCHED, - HNS3_NIC_STATE2_RESET_REQUESTED, - HNS3_NIC_STATE_MAX -}; - -#define HNS3_RING_RX_RING_BASEADDR_L_REG 0x00000 -#define HNS3_RING_RX_RING_BASEADDR_H_REG 0x00004 -#define HNS3_RING_RX_RING_BD_NUM_REG 0x00008 -#define HNS3_RING_RX_RING_BD_LEN_REG 0x0000C -#define HNS3_RING_RX_RING_TAIL_REG 0x00018 -#define HNS3_RING_RX_RING_HEAD_REG 0x0001C -#define HNS3_RING_RX_RING_FBDNUM_REG 0x00020 -#define HNS3_RING_RX_RING_PKTNUM_RECORD_REG 0x0002C - -#define HNS3_RING_TX_RING_BASEADDR_L_REG 0x00040 -#define HNS3_RING_TX_RING_BASEADDR_H_REG 0x00044 -#define HNS3_RING_TX_RING_BD_NUM_REG 0x00048 -#define HNS3_RING_TX_RING_BD_LEN_REG 0x0004C -#define HNS3_RING_TX_RING_TAIL_REG 0x00058 -#define HNS3_RING_TX_RING_HEAD_REG 0x0005C -#define HNS3_RING_TX_RING_FBDNUM_REG 0x00060 -#define HNS3_RING_TX_RING_OFFSET_REG 0x00064 -#define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C - -#define HNS3_RING_PREFETCH_EN_REG 0x0007C -#define HNS3_RING_CFG_VF_NUM_REG 0x00080 -#define HNS3_RING_ASID_REG 0x0008C -#define HNS3_RING_RX_VM_REG 0x00090 -#define HNS3_RING_T0_BE_RST 0x00094 -#define HNS3_RING_COULD_BE_RST 0x00098 -#define HNS3_RING_WRR_WEIGHT_REG 0x0009c - -#define HNS3_RING_INTMSK_RXWL_REG 0x000A0 -#define HNS3_RING_INTSTS_RX_RING_REG 0x000A4 -#define HNS3_RX_RING_INT_STS_REG 0x000A8 -#define HNS3_RING_INTMSK_TXWL_REG 0x000AC -#define HNS3_RING_INTSTS_TX_RING_REG 0x000B0 -#define HNS3_TX_RING_INT_STS_REG 0x000B4 -#define HNS3_RING_INTMSK_RX_OVERTIME_REG 0x000B8 -#define HNS3_RING_INTSTS_RX_OVERTIME_REG 0x000BC -#define HNS3_RING_INTMSK_TX_OVERTIME_REG 0x000C4 -#define HNS3_RING_INTSTS_TX_OVERTIME_REG 0x000C8 - -#define HNS3_RING_MB_CTRL_REG 0x00100 -#define HNS3_RING_MB_DATA_BASE_REG 0x00200 - -#define HNS3_TX_REG_OFFSET 0x40 - -#define HNS3_RX_HEAD_SIZE 256 - -#define HNS3_TX_TIMEOUT (5 * HZ) -#define HNS3_RING_NAME_LEN 16 -#define HNS3_BUFFER_SIZE_2048 2048 -#define HNS3_RING_MAX_PENDING 32768 -#define HNS3_RING_MIN_PENDING 8 -#define HNS3_RING_BD_MULTIPLE 8 -#define HNS3_MAX_MTU 9728 - -#define HNS3_BD_SIZE_512_TYPE 0 -#define HNS3_BD_SIZE_1024_TYPE 1 -#define HNS3_BD_SIZE_2048_TYPE 2 -#define HNS3_BD_SIZE_4096_TYPE 3 - -#define HNS3_RX_FLAG_VLAN_PRESENT 0x1 -#define HNS3_RX_FLAG_L3ID_IPV4 0x0 -#define HNS3_RX_FLAG_L3ID_IPV6 0x1 -#define HNS3_RX_FLAG_L4ID_UDP 0x0 -#define HNS3_RX_FLAG_L4ID_TCP 0x1 - -#define HNS3_RXD_DMAC_S 0 -#define HNS3_RXD_DMAC_M (0x3 << HNS3_RXD_DMAC_S) -#define HNS3_RXD_VLAN_S 2 -#define HNS3_RXD_VLAN_M (0x3 << HNS3_RXD_VLAN_S) -#define HNS3_RXD_L3ID_S 4 -#define HNS3_RXD_L3ID_M (0xf << HNS3_RXD_L3ID_S) -#define HNS3_RXD_L4ID_S 8 -#define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S) -#define HNS3_RXD_FRAG_B 12 -#define HNS3_RXD_L2E_B 16 -#define HNS3_RXD_L3E_B 17 -#define HNS3_RXD_L4E_B 18 -#define HNS3_RXD_TRUNCAT_B 19 -#define HNS3_RXD_HOI_B 20 -#define HNS3_RXD_DOI_B 21 -#define HNS3_RXD_OL3E_B 22 -#define HNS3_RXD_OL4E_B 23 - -#define HNS3_RXD_ODMAC_S 0 -#define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S) -#define HNS3_RXD_OVLAN_S 2 -#define HNS3_RXD_OVLAN_M (0x3 << HNS3_RXD_OVLAN_S) -#define HNS3_RXD_OL3ID_S 4 -#define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S) -#define HNS3_RXD_OL4ID_S 8 -#define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S) -#define HNS3_RXD_FBHI_S 12 -#define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S) -#define HNS3_RXD_FBLI_S 14 -#define HNS3_RXD_FBLI_M (0x3 << HNS3_RXD_FBLI_S) - -#define HNS3_RXD_BDTYPE_S 0 -#define HNS3_RXD_BDTYPE_M (0xf << HNS3_RXD_BDTYPE_S) -#define HNS3_RXD_VLD_B 4 -#define HNS3_RXD_UDP0_B 5 -#define HNS3_RXD_EXTEND_B 7 -#define HNS3_RXD_FE_B 8 -#define HNS3_RXD_LUM_B 9 -#define HNS3_RXD_CRCP_B 10 -#define HNS3_RXD_L3L4P_B 11 -#define HNS3_RXD_TSIND_S 12 -#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S) -#define HNS3_RXD_LKBK_B 15 -#define HNS3_RXD_HDL_S 16 -#define HNS3_RXD_HDL_M (0x7ff << HNS3_RXD_HDL_S) -#define HNS3_RXD_HSIND_B 31 - -#define HNS3_TXD_L3T_S 0 -#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S) -#define HNS3_TXD_L4T_S 2 -#define HNS3_TXD_L4T_M (0x3 << HNS3_TXD_L4T_S) -#define HNS3_TXD_L3CS_B 4 -#define HNS3_TXD_L4CS_B 5 -#define HNS3_TXD_VLAN_B 6 -#define HNS3_TXD_TSO_B 7 - -#define HNS3_TXD_L2LEN_S 8 -#define HNS3_TXD_L2LEN_M (0xff << HNS3_TXD_L2LEN_S) -#define HNS3_TXD_L3LEN_S 16 -#define HNS3_TXD_L3LEN_M (0xff << HNS3_TXD_L3LEN_S) -#define HNS3_TXD_L4LEN_S 24 -#define HNS3_TXD_L4LEN_M (0xff << HNS3_TXD_L4LEN_S) - -#define HNS3_TXD_OL3T_S 0 -#define HNS3_TXD_OL3T_M (0x3 << HNS3_TXD_OL3T_S) -#define HNS3_TXD_OVLAN_B 2 -#define HNS3_TXD_MACSEC_B 3 -#define HNS3_TXD_TUNTYPE_S 4 -#define HNS3_TXD_TUNTYPE_M (0xf << HNS3_TXD_TUNTYPE_S) - -#define HNS3_TXD_BDTYPE_S 0 -#define HNS3_TXD_BDTYPE_M (0xf << HNS3_TXD_BDTYPE_S) -#define HNS3_TXD_FE_B 4 -#define HNS3_TXD_SC_S 5 -#define HNS3_TXD_SC_M (0x3 << HNS3_TXD_SC_S) -#define HNS3_TXD_EXTEND_B 7 -#define HNS3_TXD_VLD_B 8 -#define HNS3_TXD_RI_B 9 -#define HNS3_TXD_RA_B 10 -#define HNS3_TXD_TSYN_B 11 -#define HNS3_TXD_DECTTL_S 12 -#define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S) - -#define HNS3_TXD_MSS_S 0 -#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S) - -#define HNS3_VECTOR_TX_IRQ BIT_ULL(0) -#define HNS3_VECTOR_RX_IRQ BIT_ULL(1) - -#define HNS3_VECTOR_NOT_INITED 0 -#define HNS3_VECTOR_INITED 1 - -#define HNS3_MAX_BD_SIZE 65535 -#define HNS3_MAX_BD_PER_FRAG 8 -#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS - -#define HNS3_VECTOR_GL0_OFFSET 0x100 -#define HNS3_VECTOR_GL1_OFFSET 0x200 -#define HNS3_VECTOR_GL2_OFFSET 0x300 -#define HNS3_VECTOR_RL_OFFSET 0x900 -#define HNS3_VECTOR_RL_EN_B 6 - -enum hns3_pkt_l3t_type { - HNS3_L3T_NONE, - HNS3_L3T_IPV6, - HNS3_L3T_IPV4, - HNS3_L3T_RESERVED -}; - -enum hns3_pkt_l4t_type { - HNS3_L4T_UNKNOWN, - HNS3_L4T_TCP, - HNS3_L4T_UDP, - HNS3_L4T_SCTP -}; - -enum hns3_pkt_ol3t_type { - HNS3_OL3T_NONE, - HNS3_OL3T_IPV6, - HNS3_OL3T_IPV4_NO_CSUM, - HNS3_OL3T_IPV4_CSUM -}; - -enum hns3_pkt_tun_type { - HNS3_TUN_NONE, - HNS3_TUN_MAC_IN_UDP, - HNS3_TUN_NVGRE, - HNS3_TUN_OTHER -}; - -/* hardware spec ring buffer format */ -struct __packed hns3_desc { - __le64 addr; - union { - struct { - __le16 vlan_tag; - __le16 send_size; - union { - __le32 type_cs_vlan_tso_len; - struct { - __u8 type_cs_vlan_tso; - __u8 l2_len; - __u8 l3_len; - __u8 l4_len; - }; - }; - __le16 outer_vlan_tag; - __le16 tv; - - union { - __le32 ol_type_vlan_len_msec; - struct { - __u8 ol_type_vlan_msec; - __u8 ol2_len; - __u8 ol3_len; - __u8 ol4_len; - }; - }; - - __le32 paylen; - __le16 bdtp_fe_sc_vld_ra_ri; - __le16 mss; - } tx; - - struct { - __le32 l234_info; - __le16 pkt_len; - __le16 size; - - __le32 rss_hash; - __le16 fd_id; - __le16 vlan_tag; - - union { - __le32 ol_info; - struct { - __le16 o_dm_vlan_id_fb; - __le16 ot_vlan_tag; - }; - }; - - __le32 bd_base_info; - } rx; - }; -}; - -struct hns3_desc_cb { - dma_addr_t dma; /* dma address of this desc */ - void *buf; /* cpu addr for a desc */ - - /* priv data for the desc, e.g. skb when use with ip stack*/ - void *priv; - u16 page_offset; - u16 reuse_flag; - - u16 length; /* length of the buffer */ - - /* desc type, used by the ring user to mark the type of the priv data */ - u16 type; -}; - -enum hns3_pkt_l3type { - HNS3_L3_TYPE_IPV4, - HNS3_L3_TYPE_IPV6, - HNS3_L3_TYPE_ARP, - HNS3_L3_TYPE_RARP, - HNS3_L3_TYPE_IPV4_OPT, - HNS3_L3_TYPE_IPV6_EXT, - HNS3_L3_TYPE_LLDP, - HNS3_L3_TYPE_BPDU, - HNS3_L3_TYPE_MAC_PAUSE, - HNS3_L3_TYPE_PFC_PAUSE,/* 0x9*/ - - /* reserved for 0xA~0xB*/ - - HNS3_L3_TYPE_CNM = 0xc, - - /* reserved for 0xD~0xE*/ - - HNS3_L3_TYPE_PARSE_FAIL = 0xf /* must be last */ -}; - -enum hns3_pkt_l4type { - HNS3_L4_TYPE_UDP, - HNS3_L4_TYPE_TCP, - HNS3_L4_TYPE_GRE, - HNS3_L4_TYPE_SCTP, - HNS3_L4_TYPE_IGMP, - HNS3_L4_TYPE_ICMP, - - /* reserved for 0x6~0xE */ - - HNS3_L4_TYPE_PARSE_FAIL = 0xf /* must be last */ -}; - -enum hns3_pkt_ol3type { - HNS3_OL3_TYPE_IPV4 = 0, - HNS3_OL3_TYPE_IPV6, - /* reserved for 0x2~0x3 */ - HNS3_OL3_TYPE_IPV4_OPT = 4, - HNS3_OL3_TYPE_IPV6_EXT, - - /* reserved for 0x6~0xE*/ - - HNS3_OL3_TYPE_PARSE_FAIL = 0xf /* must be last */ -}; - -enum hns3_pkt_ol4type { - HNS3_OL4_TYPE_NO_TUN, - HNS3_OL4_TYPE_MAC_IN_UDP, - HNS3_OL4_TYPE_NVGRE, - HNS3_OL4_TYPE_UNKNOWN -}; - -struct ring_stats { - u64 io_err_cnt; - u64 sw_err_cnt; - u64 seg_pkt_cnt; - union { - struct { - u64 tx_pkts; - u64 tx_bytes; - u64 tx_err_cnt; - u64 restart_queue; - u64 tx_busy; - }; - struct { - u64 rx_pkts; - u64 rx_bytes; - u64 rx_err_cnt; - u64 reuse_pg_cnt; - u64 err_pkt_len; - u64 non_vld_descs; - u64 err_bd_num; - u64 l2_err; - u64 l3l4_csum_err; - }; - }; -}; - -struct hns3_enet_ring { - u8 __iomem *io_base; /* base io address for the ring */ - struct hns3_desc *desc; /* dma map address space */ - struct hns3_desc_cb *desc_cb; - struct hns3_enet_ring *next; - struct hns3_enet_tqp_vector *tqp_vector; - struct hnae3_queue *tqp; - char ring_name[HNS3_RING_NAME_LEN]; - struct device *dev; /* will be used for DMA mapping of descriptors */ - - /* statistic */ - struct ring_stats stats; - struct u64_stats_sync syncp; - - dma_addr_t desc_dma_addr; - u32 buf_size; /* size for hnae_desc->addr, preset by AE */ - u16 desc_num; /* total number of desc */ - u16 max_desc_num_per_pkt; - u16 max_raw_data_sz_per_desc; - u16 max_pkt_size; - int next_to_use; /* idx of next spare desc */ - - /* idx of lastest sent desc, the ring is empty when equal to - * next_to_use - */ - int next_to_clean; - - u32 flag; /* ring attribute */ - int irq_init_flag; - - int numa_node; - cpumask_t affinity_mask; -}; - -struct hns_queue; - -struct hns3_nic_ring_data { - struct hns3_enet_ring *ring; - struct napi_struct napi; - int queue_index; - int (*poll_one)(struct hns3_nic_ring_data *, int, void *); - void (*ex_process)(struct hns3_nic_ring_data *, struct sk_buff *); - void (*fini_process)(struct hns3_nic_ring_data *); -}; - -struct hns3_nic_ops { - int (*fill_desc)(struct hns3_enet_ring *ring, void *priv, - int size, dma_addr_t dma, int frag_end, - enum hns_desc_type type); - int (*maybe_stop_tx)(struct sk_buff **out_skb, - int *bnum, struct hns3_enet_ring *ring); - void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum); -}; - -enum hns3_flow_level_range { - HNS3_FLOW_LOW = 0, - HNS3_FLOW_MID = 1, - HNS3_FLOW_HIGH = 2, - HNS3_FLOW_ULTRA = 3, -}; - -enum hns3_link_mode_bits { - HNS3_LM_FIBRE_BIT = BIT(0), - HNS3_LM_AUTONEG_BIT = BIT(1), - HNS3_LM_TP_BIT = BIT(2), - HNS3_LM_PAUSE_BIT = BIT(3), - HNS3_LM_BACKPLANE_BIT = BIT(4), - HNS3_LM_10BASET_HALF_BIT = BIT(5), - HNS3_LM_10BASET_FULL_BIT = BIT(6), - HNS3_LM_100BASET_HALF_BIT = BIT(7), - HNS3_LM_100BASET_FULL_BIT = BIT(8), - HNS3_LM_1000BASET_FULL_BIT = BIT(9), - HNS3_LM_10000BASEKR_FULL_BIT = BIT(10), - HNS3_LM_25000BASEKR_FULL_BIT = BIT(11), - HNS3_LM_40000BASELR4_FULL_BIT = BIT(12), - HNS3_LM_50000BASEKR2_FULL_BIT = BIT(13), - HNS3_LM_100000BASEKR4_FULL_BIT = BIT(14), - HNS3_LM_COUNT = 15 -}; - -#define HNS3_INT_GL_50K 0x000A -#define HNS3_INT_GL_20K 0x0019 -#define HNS3_INT_GL_18K 0x001B -#define HNS3_INT_GL_8K 0x003E - -struct hns3_enet_ring_group { - /* array of pointers to rings */ - struct hns3_enet_ring *ring; - u64 total_bytes; /* total bytes processed this group */ - u64 total_packets; /* total packets processed this group */ - u16 count; - enum hns3_flow_level_range flow_level; - u16 int_gl; -}; - -struct hns3_enet_tqp_vector { - struct hnae3_handle *handle; - u8 __iomem *mask_addr; - int vector_irq; - int irq_init_flag; - - u16 idx; /* index in the TQP vector array per handle. */ - - struct napi_struct napi; - - struct hns3_enet_ring_group rx_group; - struct hns3_enet_ring_group tx_group; - - u16 num_tqps; /* total number of tqps in TQP vector */ - - cpumask_t affinity_mask; - char name[HNAE3_INT_NAME_LEN]; - - /* when 0 should adjust interrupt coalesce parameter */ - u8 int_adapt_down; -} ____cacheline_internodealigned_in_smp; - -enum hns3_udp_tnl_type { - HNS3_UDP_TNL_VXLAN, - HNS3_UDP_TNL_GENEVE, - HNS3_UDP_TNL_MAX, -}; - -struct hns3_udp_tunnel { - u16 dst_port; - int used; -}; - -struct hns3_nic_priv { - struct hnae3_handle *ae_handle; - u32 enet_ver; - u32 port_id; - struct net_device *netdev; - struct device *dev; - struct hns3_nic_ops ops; - - /** - * the cb for nic to manage the ring buffer, the first half of the - * array is for tx_ring and vice versa for the second half - */ - struct hns3_nic_ring_data *ring_data; - struct hns3_enet_tqp_vector *tqp_vector; - u16 vector_num; - - /* The most recently read link state */ - int link; - u64 tx_timeout_count; - enum hnae3_reset_type reset_level; - unsigned long last_reset_time; - - unsigned long state; - - struct timer_list service_timer; - - struct work_struct service_task; - - struct notifier_block notifier_block; - /* Vxlan/Geneve information */ - struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX]; -}; - -union l3_hdr_info { - struct iphdr *v4; - struct ipv6hdr *v6; - unsigned char *hdr; -}; - -union l4_hdr_info { - struct tcphdr *tcp; - struct udphdr *udp; - unsigned char *hdr; -}; - -/* the distance between [begin, end) in a ring buffer - * note: there is a unuse slot between the begin and the end - */ -static inline int ring_dist(struct hns3_enet_ring *ring, int begin, int end) -{ - return (end - begin + ring->desc_num) % ring->desc_num; -} - -static inline int ring_space(struct hns3_enet_ring *ring) -{ - return ring->desc_num - - ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1; -} - -static inline int is_ring_empty(struct hns3_enet_ring *ring) -{ - return ring->next_to_use == ring->next_to_clean; -} - -static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) -{ - u8 __iomem *reg_addr = READ_ONCE(base); - - writel(value, reg_addr + reg); -} - -#define hns3_write_dev(a, reg, value) \ - hns3_write_reg((a)->io_base, (reg), (value)) - -#define hnae_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \ - (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG) - -#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev) - -#define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \ - DMA_TO_DEVICE : DMA_FROM_DEVICE) - -#define tx_ring_data(priv, idx) ((priv)->ring_data[idx]) - -#define hnae_buf_size(_ring) ((_ring)->buf_size) -#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring))) -#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring)) - -/* iterator for handling rings in ring group */ -#define hns3_for_each_ring(pos, head) \ - for (pos = (head).ring; pos; pos = pos->next) - -#define hns3_get_handle(ndev) \ - (((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle) - -void hns3_ethtool_set_ops(struct net_device *netdev); - -bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); -int hns3_init_all_ring(struct hns3_nic_priv *priv); -int hns3_uninit_all_ring(struct hns3_nic_priv *priv); -netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev); -int hns3_clean_rx_ring( - struct hns3_enet_ring *ring, int budget, - void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)); - -#ifdef CONFIG_HNS3_DCB -void hns3_dcbnl_setup(struct hnae3_handle *handle); -#else -static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {} -#endif - -#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c deleted file mode 100644 index a21470c72da3..000000000000 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c +++ /dev/null @@ -1,876 +0,0 @@ -/* - * Copyright (c) 2016~2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include -#include -#include - -#include "hns3_enet.h" - -struct hns3_stats { - char stats_string[ETH_GSTRING_LEN]; - int stats_size; - int stats_offset; -}; - -/* tqp related stats */ -#define HNS3_TQP_STAT(_string, _member) { \ - .stats_string = _string, \ - .stats_size = FIELD_SIZEOF(struct ring_stats, _member), \ - .stats_offset = offsetof(struct hns3_enet_ring, stats), \ -} \ - -static const struct hns3_stats hns3_txq_stats[] = { - /* Tx per-queue statistics */ - HNS3_TQP_STAT("tx_io_err_cnt", io_err_cnt), - HNS3_TQP_STAT("tx_sw_err_cnt", sw_err_cnt), - HNS3_TQP_STAT("tx_seg_pkt_cnt", seg_pkt_cnt), - HNS3_TQP_STAT("tx_pkts", tx_pkts), - HNS3_TQP_STAT("tx_bytes", tx_bytes), - HNS3_TQP_STAT("tx_err_cnt", tx_err_cnt), - HNS3_TQP_STAT("tx_restart_queue", restart_queue), - HNS3_TQP_STAT("tx_busy", tx_busy), -}; - -#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats) - -static const struct hns3_stats hns3_rxq_stats[] = { - /* Rx per-queue statistics */ - HNS3_TQP_STAT("rx_io_err_cnt", io_err_cnt), - HNS3_TQP_STAT("rx_sw_err_cnt", sw_err_cnt), - HNS3_TQP_STAT("rx_seg_pkt_cnt", seg_pkt_cnt), - HNS3_TQP_STAT("rx_pkts", rx_pkts), - HNS3_TQP_STAT("rx_bytes", rx_bytes), - HNS3_TQP_STAT("rx_err_cnt", rx_err_cnt), - HNS3_TQP_STAT("rx_reuse_pg_cnt", reuse_pg_cnt), - HNS3_TQP_STAT("rx_err_pkt_len", err_pkt_len), - HNS3_TQP_STAT("rx_non_vld_descs", non_vld_descs), - HNS3_TQP_STAT("rx_err_bd_num", err_bd_num), - HNS3_TQP_STAT("rx_l2_err", l2_err), - HNS3_TQP_STAT("rx_l3l4_csum_err", l3l4_csum_err), -}; - -#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats) - -#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT) - -#define HNS3_SELF_TEST_TPYE_NUM 1 -#define HNS3_NIC_LB_TEST_PKT_NUM 1 -#define HNS3_NIC_LB_TEST_RING_ID 0 -#define HNS3_NIC_LB_TEST_PACKET_SIZE 128 - -/* Nic loopback test err */ -#define HNS3_NIC_LB_TEST_NO_MEM_ERR 1 -#define HNS3_NIC_LB_TEST_TX_CNT_ERR 2 -#define HNS3_NIC_LB_TEST_RX_CNT_ERR 3 - -struct hns3_link_mode_mapping { - u32 hns3_link_mode; - u32 ethtool_link_mode; -}; - -static const struct hns3_link_mode_mapping hns3_lm_map[] = { - {HNS3_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT}, - {HNS3_LM_AUTONEG_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT}, - {HNS3_LM_TP_BIT, ETHTOOL_LINK_MODE_TP_BIT}, - {HNS3_LM_PAUSE_BIT, ETHTOOL_LINK_MODE_Pause_BIT}, - {HNS3_LM_BACKPLANE_BIT, ETHTOOL_LINK_MODE_Backplane_BIT}, - {HNS3_LM_10BASET_HALF_BIT, ETHTOOL_LINK_MODE_10baseT_Half_BIT}, - {HNS3_LM_10BASET_FULL_BIT, ETHTOOL_LINK_MODE_10baseT_Full_BIT}, - {HNS3_LM_100BASET_HALF_BIT, ETHTOOL_LINK_MODE_100baseT_Half_BIT}, - {HNS3_LM_100BASET_FULL_BIT, ETHTOOL_LINK_MODE_100baseT_Full_BIT}, - {HNS3_LM_1000BASET_FULL_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT}, -}; - -static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop) -{ - struct hnae3_handle *h = hns3_get_handle(ndev); - int ret; - - if (!h->ae_algo->ops->set_loopback || - !h->ae_algo->ops->set_promisc_mode) - return -EOPNOTSUPP; - - switch (loop) { - case HNAE3_MAC_INTER_LOOP_MAC: - ret = h->ae_algo->ops->set_loopback(h, loop, true); - break; - case HNAE3_MAC_LOOP_NONE: - ret = h->ae_algo->ops->set_loopback(h, - HNAE3_MAC_INTER_LOOP_MAC, false); - break; - default: - ret = -ENOTSUPP; - break; - } - - if (ret) - return ret; - - if (loop == HNAE3_MAC_LOOP_NONE) - h->ae_algo->ops->set_promisc_mode(h, ndev->flags & IFF_PROMISC); - else - h->ae_algo->ops->set_promisc_mode(h, 1); - - return ret; -} - -static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) -{ - struct hnae3_handle *h = hns3_get_handle(ndev); - int ret; - - if (!h->ae_algo->ops->start) - return -EOPNOTSUPP; - - ret = h->ae_algo->ops->start(h); - if (ret) { - netdev_err(ndev, - "hns3_lb_up ae start return error: %d\n", ret); - return ret; - } - - ret = hns3_lp_setup(ndev, loop_mode); - usleep_range(10000, 20000); - - return ret; -} - -static int hns3_lp_down(struct net_device *ndev) -{ - struct hnae3_handle *h = hns3_get_handle(ndev); - int ret; - - if (!h->ae_algo->ops->stop) - return -EOPNOTSUPP; - - ret = hns3_lp_setup(ndev, HNAE3_MAC_LOOP_NONE); - if (ret) { - netdev_err(ndev, "lb_setup return error: %d\n", ret); - return ret; - } - - h->ae_algo->ops->stop(h); - usleep_range(10000, 20000); - - return 0; -} - -static void hns3_lp_setup_skb(struct sk_buff *skb) -{ - struct net_device *ndev = skb->dev; - unsigned char *packet; - struct ethhdr *ethh; - unsigned int i; - - skb_reserve(skb, NET_IP_ALIGN); - ethh = skb_put(skb, sizeof(struct ethhdr)); - packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE); - - memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN); - eth_zero_addr(ethh->h_source); - ethh->h_proto = htons(ETH_P_ARP); - skb_reset_mac_header(skb); - - for (i = 0; i < HNS3_NIC_LB_TEST_PACKET_SIZE; i++) - packet[i] = (unsigned char)(i & 0xff); -} - -static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring, - struct sk_buff *skb) -{ - struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector; - unsigned char *packet = skb->data; - u32 i; - - for (i = 0; i < skb->len; i++) - if (packet[i] != (unsigned char)(i & 0xff)) - break; - - /* The packet is correctly received */ - if (i == skb->len) - tqp_vector->rx_group.total_packets++; - else - print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1, - skb->data, skb->len, true); - - dev_kfree_skb_any(skb); -} - -static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget) -{ - struct hnae3_handle *h = priv->ae_handle; - struct hnae3_knic_private_info *kinfo; - u32 i, rcv_good_pkt_total = 0; - - kinfo = &h->kinfo; - for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) { - struct hns3_enet_ring *ring = priv->ring_data[i].ring; - struct hns3_enet_ring_group *rx_group; - u64 pre_rx_pkt; - - rx_group = &ring->tqp_vector->rx_group; - pre_rx_pkt = rx_group->total_packets; - - hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data); - - rcv_good_pkt_total += (rx_group->total_packets - pre_rx_pkt); - rx_group->total_packets = pre_rx_pkt; - } - return rcv_good_pkt_total; -} - -static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid, - u32 end_ringid, u32 budget) -{ - u32 i; - - for (i = start_ringid; i <= end_ringid; i++) { - struct hns3_enet_ring *ring = priv->ring_data[i].ring; - - hns3_clean_tx_ring(ring, budget); - } -} - -/** - * hns3_lp_run_test - run loopback test - * @ndev: net device - * @mode: loopback type - */ -static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode) -{ - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct sk_buff *skb; - u32 i, good_cnt; - int ret_val = 0; - - skb = alloc_skb(HNS3_NIC_LB_TEST_PACKET_SIZE + ETH_HLEN + NET_IP_ALIGN, - GFP_KERNEL); - if (!skb) - return HNS3_NIC_LB_TEST_NO_MEM_ERR; - - skb->dev = ndev; - hns3_lp_setup_skb(skb); - skb->queue_mapping = HNS3_NIC_LB_TEST_RING_ID; - - good_cnt = 0; - for (i = 0; i < HNS3_NIC_LB_TEST_PKT_NUM; i++) { - netdev_tx_t tx_ret; - - skb_get(skb); - tx_ret = hns3_nic_net_xmit(skb, ndev); - if (tx_ret == NETDEV_TX_OK) - good_cnt++; - else - netdev_err(ndev, "hns3_lb_run_test xmit failed: %d\n", - tx_ret); - } - if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) { - ret_val = HNS3_NIC_LB_TEST_TX_CNT_ERR; - netdev_err(ndev, "mode %d sent fail, cnt=0x%x, budget=0x%x\n", - mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM); - goto out; - } - - /* Allow 200 milliseconds for packets to go from Tx to Rx */ - msleep(200); - - good_cnt = hns3_lb_check_rx_ring(priv, HNS3_NIC_LB_TEST_PKT_NUM); - if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) { - ret_val = HNS3_NIC_LB_TEST_RX_CNT_ERR; - netdev_err(ndev, "mode %d recv fail, cnt=0x%x, budget=0x%x\n", - mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM); - } - -out: - hns3_lb_clear_tx_ring(priv, HNS3_NIC_LB_TEST_RING_ID, - HNS3_NIC_LB_TEST_RING_ID, - HNS3_NIC_LB_TEST_PKT_NUM); - - kfree_skb(skb); - return ret_val; -} - -/** - * hns3_nic_self_test - self test - * @ndev: net device - * @eth_test: test cmd - * @data: test result - */ -static void hns3_self_test(struct net_device *ndev, - struct ethtool_test *eth_test, u64 *data) -{ - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hnae3_handle *h = priv->ae_handle; - int st_param[HNS3_SELF_TEST_TPYE_NUM][2]; - bool if_running = netif_running(ndev); - int test_index = 0; - u32 i; - - /* Only do offline selftest, or pass by default */ - if (eth_test->flags != ETH_TEST_FL_OFFLINE) - return; - - st_param[HNAE3_MAC_INTER_LOOP_MAC][0] = HNAE3_MAC_INTER_LOOP_MAC; - st_param[HNAE3_MAC_INTER_LOOP_MAC][1] = - h->flags & HNAE3_SUPPORT_MAC_LOOPBACK; - - if (if_running) - dev_close(ndev); - - set_bit(HNS3_NIC_STATE_TESTING, &priv->state); - - for (i = 0; i < HNS3_SELF_TEST_TPYE_NUM; i++) { - enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0]; - - if (!st_param[i][1]) - continue; - - data[test_index] = hns3_lp_up(ndev, loop_type); - if (!data[test_index]) { - data[test_index] = hns3_lp_run_test(ndev, loop_type); - hns3_lp_down(ndev); - } - - if (data[test_index]) - eth_test->flags |= ETH_TEST_FL_FAILED; - - test_index++; - } - - clear_bit(HNS3_NIC_STATE_TESTING, &priv->state); - - if (if_running) - dev_open(ndev); -} - -static void hns3_driv_to_eth_caps(u32 caps, struct ethtool_link_ksettings *cmd, - bool is_advertised) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(hns3_lm_map); i++) { - if (!(caps & hns3_lm_map[i].hns3_link_mode)) - continue; - - if (is_advertised) - __set_bit(hns3_lm_map[i].ethtool_link_mode, - cmd->link_modes.advertising); - else - __set_bit(hns3_lm_map[i].ethtool_link_mode, - cmd->link_modes.supported); - } -} - -static int hns3_get_sset_count(struct net_device *netdev, int stringset) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - const struct hnae3_ae_ops *ops = h->ae_algo->ops; - - if (!ops->get_sset_count) - return -EOPNOTSUPP; - - switch (stringset) { - case ETH_SS_STATS: - return ((HNS3_TQP_STATS_COUNT * h->kinfo.num_tqps) + - ops->get_sset_count(h, stringset)); - - case ETH_SS_TEST: - return ops->get_sset_count(h, stringset); - } - - return 0; -} - -static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, - u32 stat_count, u32 num_tqps) -{ -#define MAX_PREFIX_SIZE (8 + 4) - u32 size_left; - u32 i, j; - u32 n1; - - for (i = 0; i < num_tqps; i++) { - for (j = 0; j < stat_count; j++) { - data[ETH_GSTRING_LEN - 1] = '\0'; - - /* first, prepend the prefix string */ - n1 = snprintf(data, MAX_PREFIX_SIZE, "rcb_q%d_", i); - n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1); - size_left = (ETH_GSTRING_LEN - 1) - n1; - - /* now, concatenate the stats string to it */ - strncat(data, stats[j].stats_string, size_left); - data += ETH_GSTRING_LEN; - } - } - - return data; -} - -static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data) -{ - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - - /* get strings for Tx */ - data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT, - kinfo->num_tqps); - - /* get strings for Rx */ - data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT, - kinfo->num_tqps); - - return data; -} - -static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - const struct hnae3_ae_ops *ops = h->ae_algo->ops; - char *buff = (char *)data; - - if (!ops->get_strings) - return; - - switch (stringset) { - case ETH_SS_STATS: - buff = hns3_get_strings_tqps(h, buff); - h->ae_algo->ops->get_strings(h, stringset, (u8 *)buff); - break; - case ETH_SS_TEST: - ops->get_strings(h, stringset, data); - break; - } -} - -static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) -{ - struct hns3_nic_priv *nic_priv = (struct hns3_nic_priv *)handle->priv; - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct hns3_enet_ring *ring; - u8 *stat; - u32 i; - - /* get stats for Tx */ - for (i = 0; i < kinfo->num_tqps; i++) { - ring = nic_priv->ring_data[i].ring; - for (i = 0; i < HNS3_TXQ_STATS_COUNT; i++) { - stat = (u8 *)ring + hns3_txq_stats[i].stats_offset; - *data++ = *(u64 *)stat; - } - } - - /* get stats for Rx */ - for (i = 0; i < kinfo->num_tqps; i++) { - ring = nic_priv->ring_data[i + kinfo->num_tqps].ring; - for (i = 0; i < HNS3_RXQ_STATS_COUNT; i++) { - stat = (u8 *)ring + hns3_rxq_stats[i].stats_offset; - *data++ = *(u64 *)stat; - } - } - - return data; -} - -/* hns3_get_stats - get detail statistics. - * @netdev: net device - * @stats: statistics info. - * @data: statistics data. - */ -static void hns3_get_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - u64 *p = data; - - if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) { - netdev_err(netdev, "could not get any statistics\n"); - return; - } - - h->ae_algo->ops->update_stats(h, &netdev->stats); - - /* get per-queue stats */ - p = hns3_get_stats_tqps(h, p); - - /* get MAC & other misc hardware stats */ - h->ae_algo->ops->get_stats(h, p); -} - -static void hns3_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; - - strncpy(drvinfo->version, hns3_driver_version, - sizeof(drvinfo->version)); - drvinfo->version[sizeof(drvinfo->version) - 1] = '\0'; - - strncpy(drvinfo->driver, h->pdev->driver->name, - sizeof(drvinfo->driver)); - drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0'; - - strncpy(drvinfo->bus_info, pci_name(h->pdev), - sizeof(drvinfo->bus_info)); - drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0'; - - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", - priv->ae_handle->ae_algo->ops->get_fw_version(h)); -} - -static u32 hns3_get_link(struct net_device *netdev) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_status) - return h->ae_algo->ops->get_status(h); - else - return 0; -} - -static void hns3_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *param) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; - int queue_num = h->kinfo.num_tqps; - - param->tx_max_pending = HNS3_RING_MAX_PENDING; - param->rx_max_pending = HNS3_RING_MAX_PENDING; - - param->tx_pending = priv->ring_data[0].ring->desc_num; - param->rx_pending = priv->ring_data[queue_num].ring->desc_num; -} - -static void hns3_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *param) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_pauseparam) - h->ae_algo->ops->get_pauseparam(h, ¶m->autoneg, - ¶m->rx_pause, ¶m->tx_pause); -} - -static int hns3_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *cmd) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - u32 supported_caps; - u32 advertised_caps; - u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN; - u8 link_stat; - - if (!h->ae_algo || !h->ae_algo->ops) - return -EOPNOTSUPP; - - /* 1.auto_neg & speed & duplex from cmd */ - if (netdev->phydev) - phy_ethtool_ksettings_get(netdev->phydev, cmd); - else if (h->ae_algo->ops->get_ksettings_an_result) - h->ae_algo->ops->get_ksettings_an_result(h, - &cmd->base.autoneg, - &cmd->base.speed, - &cmd->base.duplex); - else - return -EOPNOTSUPP; - - link_stat = hns3_get_link(netdev); - if (!link_stat) { - cmd->base.speed = SPEED_UNKNOWN; - cmd->base.duplex = DUPLEX_UNKNOWN; - } - - /* 2.media_type get from bios parameter block */ - if (h->ae_algo->ops->get_media_type) { - h->ae_algo->ops->get_media_type(h, &media_type); - - switch (media_type) { - case HNAE3_MEDIA_TYPE_FIBER: - cmd->base.port = PORT_FIBRE; - supported_caps = HNS3_LM_FIBRE_BIT | - HNS3_LM_AUTONEG_BIT | - HNS3_LM_PAUSE_BIT | - HNS3_LM_1000BASET_FULL_BIT; - - advertised_caps = supported_caps; - break; - case HNAE3_MEDIA_TYPE_COPPER: - cmd->base.port = PORT_TP; - supported_caps = HNS3_LM_TP_BIT | - HNS3_LM_AUTONEG_BIT | - HNS3_LM_PAUSE_BIT | - HNS3_LM_1000BASET_FULL_BIT | - HNS3_LM_100BASET_FULL_BIT | - HNS3_LM_100BASET_HALF_BIT | - HNS3_LM_10BASET_FULL_BIT | - HNS3_LM_10BASET_HALF_BIT; - advertised_caps = supported_caps; - break; - case HNAE3_MEDIA_TYPE_BACKPLANE: - cmd->base.port = PORT_NONE; - supported_caps = HNS3_LM_BACKPLANE_BIT | - HNS3_LM_PAUSE_BIT | - HNS3_LM_AUTONEG_BIT | - HNS3_LM_1000BASET_FULL_BIT | - HNS3_LM_100BASET_FULL_BIT | - HNS3_LM_100BASET_HALF_BIT | - HNS3_LM_10BASET_FULL_BIT | - HNS3_LM_10BASET_HALF_BIT; - - advertised_caps = supported_caps; - break; - case HNAE3_MEDIA_TYPE_UNKNOWN: - default: - cmd->base.port = PORT_OTHER; - supported_caps = 0; - advertised_caps = 0; - break; - } - - if (!cmd->base.autoneg) - advertised_caps &= ~HNS3_LM_AUTONEG_BIT; - - /* now, map driver link modes to ethtool link modes */ - hns3_driv_to_eth_caps(supported_caps, cmd, false); - hns3_driv_to_eth_caps(advertised_caps, cmd, true); - } - - /* 3.mdix_ctrl&mdix get from phy reg */ - if (h->ae_algo->ops->get_mdix_mode) - h->ae_algo->ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl, - &cmd->base.eth_tp_mdix); - /* 4.mdio_support */ - cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; - - return 0; -} - -static int hns3_set_link_ksettings(struct net_device *netdev, - const struct ethtool_link_ksettings *cmd) -{ - /* Only support ksettings_set for netdev with phy attached for now */ - if (netdev->phydev) - return phy_ethtool_ksettings_set(netdev->phydev, cmd); - - return -EOPNOTSUPP; -} - -static u32 hns3_get_rss_key_size(struct net_device *netdev) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (!h->ae_algo || !h->ae_algo->ops || - !h->ae_algo->ops->get_rss_key_size) - return -EOPNOTSUPP; - - return h->ae_algo->ops->get_rss_key_size(h); -} - -static u32 hns3_get_rss_indir_size(struct net_device *netdev) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (!h->ae_algo || !h->ae_algo->ops || - !h->ae_algo->ops->get_rss_indir_size) - return -EOPNOTSUPP; - - return h->ae_algo->ops->get_rss_indir_size(h); -} - -static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss) - return -EOPNOTSUPP; - - return h->ae_algo->ops->get_rss(h, indir, key, hfunc); -} - -static int hns3_set_rss(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss) - return -EOPNOTSUPP; - - /* currently we only support Toeplitz hash */ - if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) { - netdev_err(netdev, - "hash func not supported (only Toeplitz hash)\n"); - return -EOPNOTSUPP; - } - if (!indir) { - netdev_err(netdev, - "set rss failed for indir is empty\n"); - return -EOPNOTSUPP; - } - - return h->ae_algo->ops->set_rss(h, indir, key, hfunc); -} - -static int hns3_get_rxnfc(struct net_device *netdev, - struct ethtool_rxnfc *cmd, - u32 *rule_locs) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss_tuple) - return -EOPNOTSUPP; - - switch (cmd->cmd) { - case ETHTOOL_GRXRINGS: - cmd->data = h->kinfo.num_tc * h->kinfo.rss_size; - break; - case ETHTOOL_GRXFH: - return h->ae_algo->ops->get_rss_tuple(h, cmd); - default: - return -EOPNOTSUPP; - } - - return 0; -} - -static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, - u32 new_desc_num) -{ - struct hnae3_handle *h = priv->ae_handle; - int i; - - h->kinfo.num_desc = new_desc_num; - - for (i = 0; i < h->kinfo.num_tqps * 2; i++) - priv->ring_data[i].ring->desc_num = new_desc_num; - - return hns3_init_all_ring(priv); -} - -static int hns3_set_ringparam(struct net_device *ndev, - struct ethtool_ringparam *param) -{ - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hnae3_handle *h = priv->ae_handle; - bool if_running = netif_running(ndev); - u32 old_desc_num, new_desc_num; - int ret; - - if (param->rx_mini_pending || param->rx_jumbo_pending) - return -EINVAL; - - if (param->tx_pending != param->rx_pending) { - netdev_err(ndev, - "Descriptors of tx and rx must be equal"); - return -EINVAL; - } - - if (param->tx_pending > HNS3_RING_MAX_PENDING || - param->tx_pending < HNS3_RING_MIN_PENDING) { - netdev_err(ndev, - "Descriptors requested (Tx/Rx: %d) out of range [%d-%d]\n", - param->tx_pending, HNS3_RING_MIN_PENDING, - HNS3_RING_MAX_PENDING); - return -EINVAL; - } - - new_desc_num = param->tx_pending; - - /* Hardware requires that its descriptors must be multiple of eight */ - new_desc_num = ALIGN(new_desc_num, HNS3_RING_BD_MULTIPLE); - old_desc_num = h->kinfo.num_desc; - if (old_desc_num == new_desc_num) - return 0; - - netdev_info(ndev, - "Changing descriptor count from %d to %d.\n", - old_desc_num, new_desc_num); - - if (if_running) - dev_close(ndev); - - ret = hns3_uninit_all_ring(priv); - if (ret) - return ret; - - ret = hns3_change_all_ring_bd_num(priv, new_desc_num); - if (ret) { - ret = hns3_change_all_ring_bd_num(priv, old_desc_num); - if (ret) { - netdev_err(ndev, - "Revert to old bd num fail, ret=%d.\n", ret); - return ret; - } - } - - if (if_running) - ret = dev_open(ndev); - - return ret; -} - -static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss_tuple) - return -EOPNOTSUPP; - - switch (cmd->cmd) { - case ETHTOOL_SRXFH: - return h->ae_algo->ops->set_rss_tuple(h, cmd); - default: - return -EOPNOTSUPP; - } -} - -static int hns3_nway_reset(struct net_device *netdev) -{ - struct phy_device *phy = netdev->phydev; - - if (!netif_running(netdev)) - return 0; - - /* Only support nway_reset for netdev with phy attached for now */ - if (!phy) - return -EOPNOTSUPP; - - if (phy->autoneg != AUTONEG_ENABLE) - return -EINVAL; - - return genphy_restart_aneg(phy); -} - -static const struct ethtool_ops hns3_ethtool_ops = { - .self_test = hns3_self_test, - .get_drvinfo = hns3_get_drvinfo, - .get_link = hns3_get_link, - .get_ringparam = hns3_get_ringparam, - .set_ringparam = hns3_set_ringparam, - .get_pauseparam = hns3_get_pauseparam, - .get_strings = hns3_get_strings, - .get_ethtool_stats = hns3_get_stats, - .get_sset_count = hns3_get_sset_count, - .get_rxnfc = hns3_get_rxnfc, - .set_rxnfc = hns3_set_rxnfc, - .get_rxfh_key_size = hns3_get_rss_key_size, - .get_rxfh_indir_size = hns3_get_rss_indir_size, - .get_rxfh = hns3_get_rss, - .set_rxfh = hns3_set_rss, - .get_link_ksettings = hns3_get_link_ksettings, - .set_link_ksettings = hns3_set_link_ksettings, - .nway_reset = hns3_nway_reset, -}; - -void hns3_ethtool_set_ops(struct net_device *netdev) -{ - netdev->ethtool_ops = &hns3_ethtool_ops; -} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index f5a2a69f8249..31866057bc7d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -310,6 +310,7 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) nic->ae_algo = &ae_algovf; nic->pdev = hdev->pdev; nic->numa_node_mask = hdev->numa_node_mask; + nic->flags |= HNAE3_SUPPORT_VF; if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { dev_err(&hdev->pdev->dev, "unsupported device type %d\n", -- cgit From dde1a86e93cadf9b17ec0a95a78c99505c48fd83 Mon Sep 17 00:00:00 2001 From: Salil Mehta Date: Thu, 14 Dec 2017 18:03:07 +0000 Subject: net: hns3: Add mailbox support to PF driver Command queue provides the provision of Mailbox command which can be used for communication between PF and VF. PF handles messages from various VFs for fetching various information like, queue, vlan, link status related etc. It also handles the request from various VFs to perform certain privileged operations. This patch adds the support of a message handler for handling such various command requests from VF. Signed-off-by: Salil Mehta Signed-off-by: lipeng Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/Makefile | 3 +- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 1 + .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 2 + .../net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 304 +++++++++++++++++++++ 4 files changed, 309 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile index d077fa02b66e..cb8ddd043476 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0+ # # Makefile for the HISILICON network device drivers. # @@ -5,6 +6,6 @@ ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 obj-$(CONFIG_HNS3_HCLGE) += hclge.o -hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o +hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index d07c700c7ff8..980fcdf68808 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -21,6 +21,7 @@ #include "hclge_cmd.h" #include "hclge_dcb.h" #include "hclge_main.h" +#include "hclge_mbx.h" #include "hclge_mdio.h" #include "hclge_tm.h" #include "hnae3.h" diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index aacec438b933..028817c7d67c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -554,4 +554,6 @@ int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid, int hclge_buffer_alloc(struct hclge_dev *hdev); int hclge_rss_init_hw(struct hclge_dev *hdev); + +void hclge_mbx_handler(struct hclge_dev *hdev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c new file mode 100644 index 000000000000..24b1f14133b4 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include "hclge_main.h" +#include "hclge_mbx.h" +#include "hnae3.h" + +/* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF + * receives a mailbox message from VF. + * @vport: pointer to struct hclge_vport + * @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox + * message + * @resp_status: indicate to VF whether its request success(0) or failed. + */ +static int hclge_gen_resp_to_vf(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req, + int resp_status, + u8 *resp_data, u16 resp_data_len) +{ + struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; + struct hclge_dev *hdev = vport->back; + enum hclge_cmd_status status; + struct hclge_desc desc; + + resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; + + if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) { + dev_err(&hdev->pdev->dev, + "PF fail to gen resp to VF len %d exceeds max len %d\n", + resp_data_len, + HCLGE_MBX_MAX_RESP_DATA_SIZE); + } + + hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); + + resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid; + resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len; + + resp_pf_to_vf->msg[0] = HCLGE_MBX_PF_VF_RESP; + resp_pf_to_vf->msg[1] = vf_to_pf_req->msg[0]; + resp_pf_to_vf->msg[2] = vf_to_pf_req->msg[1]; + resp_pf_to_vf->msg[3] = (resp_status == 0) ? 0 : 1; + + if (resp_data && resp_data_len > 0) + memcpy(&resp_pf_to_vf->msg[4], resp_data, resp_data_len); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "PF failed(=%d) to send response to VF\n", status); + + return status; +} + +static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, + u16 mbx_opcode, u8 dest_vfid) +{ + struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; + struct hclge_dev *hdev = vport->back; + enum hclge_cmd_status status; + struct hclge_desc desc; + + resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); + + resp_pf_to_vf->dest_vfid = dest_vfid; + resp_pf_to_vf->msg_len = msg_len; + resp_pf_to_vf->msg[0] = mbx_opcode; + + memcpy(&resp_pf_to_vf->msg[1], msg, msg_len); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "PF failed(=%d) to send mailbox message to VF\n", + status); + + return status; +} + +static int hclge_set_vf_promisc_mode(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *req) +{ + bool en = req->msg[1] ? true : false; + struct hclge_promisc_param param; + + /* always enable broadcast promisc bit */ + hclge_promisc_param_init(¶m, en, en, true, vport->vport_id); + return hclge_cmd_set_promisc_mode(vport->back, ¶m); +} + +static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + bool gen_resp) +{ + const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]); + struct hclge_dev *hdev = vport->back; + int status; + + if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY) { + const u8 *old_addr = (const u8 *)(&mbx_req->msg[8]); + + hclge_rm_uc_addr_common(vport, old_addr); + status = hclge_add_uc_addr_common(vport, mac_addr); + } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) { + status = hclge_add_uc_addr_common(vport, mac_addr); + } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) { + status = hclge_rm_uc_addr_common(vport, mac_addr); + } else { + dev_err(&hdev->pdev->dev, + "failed to set unicast mac addr, unknown subcode %d\n", + mbx_req->msg[1]); + return -EIO; + } + + if (gen_resp) + hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0); + + return 0; +} + +static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + bool gen_resp) +{ + const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]); + struct hclge_dev *hdev = vport->back; + int status; + + if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) { + status = hclge_add_mc_addr_common(vport, mac_addr); + } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) { + status = hclge_rm_mc_addr_common(vport, mac_addr); + } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE) { + u8 func_id = vport->vport_id; + bool enable = mbx_req->msg[2]; + + status = hclge_cfg_func_mta_filter(hdev, func_id, enable); + } else { + dev_err(&hdev->pdev->dev, + "failed to set mcast mac addr, unknown subcode %d\n", + mbx_req->msg[1]); + return -EIO; + } + + if (gen_resp) + hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0); + + return 0; +} + +static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + bool gen_resp) +{ + struct hclge_dev *hdev = vport->back; + int status = 0; + + if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) { + u16 vlan, proto; + bool is_kill; + + is_kill = !!mbx_req->msg[2]; + memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan)); + memcpy(&proto, &mbx_req->msg[5], sizeof(proto)); + status = hclge_set_vf_vlan_common(hdev, vport->vport_id, + is_kill, vlan, 0, + cpu_to_be16(proto)); + } + + if (gen_resp) + status = hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0); + + return status; +} + +static int hclge_get_vf_tcinfo(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + bool gen_resp) +{ + struct hclge_dev *hdev = vport->back; + int ret; + + ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &hdev->hw_tc_map, + sizeof(u8)); + + return ret; +} + +static int hclge_get_vf_queue_info(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + bool gen_resp) +{ +#define HCLGE_TQPS_RSS_INFO_LEN 8 + u8 resp_data[HCLGE_TQPS_RSS_INFO_LEN]; + struct hclge_dev *hdev = vport->back; + + /* get the queue related info */ + memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16)); + memcpy(&resp_data[2], &hdev->rss_size_max, sizeof(u16)); + memcpy(&resp_data[4], &hdev->num_desc, sizeof(u16)); + memcpy(&resp_data[6], &hdev->rx_buf_len, sizeof(u16)); + + return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, + HCLGE_TQPS_RSS_INFO_LEN); +} + +static int hclge_get_link_info(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + struct hclge_dev *hdev = vport->back; + u16 link_status; + u8 msg_data[2]; + u8 dest_vfid; + + /* mac.link can only be 0 or 1 */ + link_status = (u16)hdev->hw.mac.link; + memcpy(&msg_data[0], &link_status, sizeof(u16)); + dest_vfid = mbx_req->mbx_src_vfid; + + /* send this requested info to VF */ + return hclge_send_mbx_msg(vport, msg_data, sizeof(u8), + HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid); +} + +void hclge_mbx_handler(struct hclge_dev *hdev) +{ + struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq; + struct hclge_mbx_vf_to_pf_cmd *req; + struct hclge_vport *vport; + struct hclge_desc *desc; + int ret; + + /* handle all the mailbox requests in the queue */ + while (hnae_get_bit(crq->desc[crq->next_to_use].flag, + HCLGE_CMDQ_RX_OUTVLD_B)) { + desc = &crq->desc[crq->next_to_use]; + req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; + + vport = &hdev->vport[req->mbx_src_vfid]; + + switch (req->msg[0]) { + case HCLGE_MBX_SET_PROMISC_MODE: + ret = hclge_set_vf_promisc_mode(vport, req); + if (ret) + dev_err(&hdev->pdev->dev, + "PF fail(%d) to set VF promisc mode\n", + ret); + break; + case HCLGE_MBX_SET_UNICAST: + ret = hclge_set_vf_uc_mac_addr(vport, req, false); + if (ret) + dev_err(&hdev->pdev->dev, + "PF fail(%d) to set VF UC MAC Addr\n", + ret); + break; + case HCLGE_MBX_SET_MULTICAST: + ret = hclge_set_vf_mc_mac_addr(vport, req, false); + if (ret) + dev_err(&hdev->pdev->dev, + "PF fail(%d) to set VF MC MAC Addr\n", + ret); + break; + case HCLGE_MBX_SET_VLAN: + ret = hclge_set_vf_vlan_cfg(vport, req, false); + if (ret) + dev_err(&hdev->pdev->dev, + "PF failed(%d) to config VF's VLAN\n", + ret); + break; + case HCLGE_MBX_GET_QINFO: + ret = hclge_get_vf_queue_info(vport, req, true); + if (ret) + dev_err(&hdev->pdev->dev, + "PF failed(%d) to get Q info for VF\n", + ret); + break; + case HCLGE_MBX_GET_TCINFO: + ret = hclge_get_vf_tcinfo(vport, req, true); + if (ret) + dev_err(&hdev->pdev->dev, + "PF failed(%d) to get TC info for VF\n", + ret); + break; + case HCLGE_MBX_GET_LINK_STATUS: + ret = hclge_get_link_info(vport, req); + if (ret) + dev_err(&hdev->pdev->dev, + "PF fail(%d) to get link stat for VF\n", + ret); + break; + default: + dev_err(&hdev->pdev->dev, + "un-supported mailbox message, code = %d\n", + req->msg[0]); + break; + } + hclge_mbx_ring_ptr_move_crq(crq); + } + + /* Write back CMDQ_RQ header pointer, M7 need this pointer */ + hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use); +} -- cgit From 84e095d64ed974bd46351650fc8188d372b89fde Mon Sep 17 00:00:00 2001 From: Salil Mehta Date: Thu, 14 Dec 2017 18:03:08 +0000 Subject: net: hns3: Change PF to add ring-vect binding & resetQ to mailbox This patch is required to support ring-vector binding and reset of TQPs requested by the VF driver to the PF driver. Mailbox handler is added with corresponding VF commands/messages to handle the request. Signed-off-by: Salil Mehta Signed-off-by: lipeng Signed-off-by: David S. Miller --- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 138 +++++++-------------- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 7 +- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 106 ++++++++++++++++ 3 files changed, 159 insertions(+), 92 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 980fcdf68808..3b1fc493b318 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3256,49 +3256,48 @@ err: return ret; } -int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id, - struct hnae3_ring_chain_node *ring_chain) +int hclge_bind_ring_with_vector(struct hclge_vport *vport, + int vector_id, bool en, + struct hnae3_ring_chain_node *ring_chain) { struct hclge_dev *hdev = vport->back; - struct hclge_ctrl_vector_chain_cmd *req; struct hnae3_ring_chain_node *node; struct hclge_desc desc; - int ret; + struct hclge_ctrl_vector_chain_cmd *req + = (struct hclge_ctrl_vector_chain_cmd *)desc.data; + enum hclge_cmd_status status; + enum hclge_opcode_type op; + u16 tqp_type_and_id; int i; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false); - - req = (struct hclge_ctrl_vector_chain_cmd *)desc.data; + op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; + hclge_cmd_setup_basic_desc(&desc, op, false); req->int_vector_id = vector_id; i = 0; for (node = ring_chain; node; node = node->next) { - u16 type_and_id = 0; - - hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S, + tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); + hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, + HCLGE_INT_TYPE_S, hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); - hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S, - node->tqp_index); - hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M, - HCLGE_INT_GL_IDX_S, - hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); - req->tqp_type_and_id[i] = cpu_to_le16(type_and_id); - req->vfid = vport->vport_id; - + hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, + HCLGE_TQP_ID_S, node->tqp_index); + req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; + req->vfid = vport->vport_id; - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) { dev_err(&hdev->pdev->dev, "Map TQP fail, status is %d.\n", - ret); - return ret; + status); + return -EIO; } i = 0; hclge_cmd_setup_basic_desc(&desc, - HCLGE_OPC_ADD_RING_TO_VECTOR, + op, false); req->int_vector_id = vector_id; } @@ -3306,21 +3305,21 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id, if (i > 0) { req->int_cause_num = i; - - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + req->vfid = vport->vport_id; + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) { dev_err(&hdev->pdev->dev, - "Map TQP fail, status is %d.\n", ret); - return ret; + "Map TQP fail, status is %d.\n", status); + return -EIO; } } return 0; } -static int hclge_map_handle_ring_to_vector( - struct hnae3_handle *handle, int vector, - struct hnae3_ring_chain_node *ring_chain) +static int hclge_map_ring_to_vector(struct hnae3_handle *handle, + int vector, + struct hnae3_ring_chain_node *ring_chain) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; @@ -3329,24 +3328,20 @@ static int hclge_map_handle_ring_to_vector( vector_id = hclge_get_vector_index(hdev, vector); if (vector_id < 0) { dev_err(&hdev->pdev->dev, - "Get vector index fail. ret =%d\n", vector_id); + "Get vector index fail. vector_id =%d\n", vector_id); return vector_id; } - return hclge_map_vport_ring_to_vector(vport, vector_id, ring_chain); + return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); } -static int hclge_unmap_ring_from_vector( - struct hnae3_handle *handle, int vector, - struct hnae3_ring_chain_node *ring_chain) +static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, + int vector, + struct hnae3_ring_chain_node *ring_chain) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - struct hclge_ctrl_vector_chain_cmd *req; - struct hnae3_ring_chain_node *node; - struct hclge_desc desc; - int i, vector_id; - int ret; + int vector_id, ret; vector_id = hclge_get_vector_index(hdev, vector); if (vector_id < 0) { @@ -3355,54 +3350,17 @@ static int hclge_unmap_ring_from_vector( return vector_id; } - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false); - - req = (struct hclge_ctrl_vector_chain_cmd *)desc.data; - req->int_vector_id = vector_id; - - i = 0; - for (node = ring_chain; node; node = node->next) { - u16 type_and_id = 0; - - hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S, - hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); - hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S, - node->tqp_index); - hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M, - HCLGE_INT_GL_IDX_S, - hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); - - req->tqp_type_and_id[i] = cpu_to_le16(type_and_id); - req->vfid = vport->vport_id; - - if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { - req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; - - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Unmap TQP fail, status is %d.\n", - ret); - return ret; - } - i = 0; - hclge_cmd_setup_basic_desc(&desc, - HCLGE_OPC_DEL_RING_TO_VECTOR, - false); - req->int_vector_id = vector_id; - } + ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); + if (ret) { + dev_err(&handle->pdev->dev, + "Unmap ring from vector fail. vectorid=%d, ret =%d\n", + vector_id, + ret); + return ret; } - if (i > 0) { - req->int_cause_num = i; - - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Unmap TQP fail, status is %d.\n", ret); - return ret; - } - } + /* Free this MSIX or MSI vector */ + hclge_free_vector(hdev, vector_id); return 0; } @@ -4423,7 +4381,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); } -static void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) +void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; @@ -4995,8 +4953,8 @@ static const struct hnae3_ae_ops hclge_ops = { .uninit_ae_dev = hclge_uninit_ae_dev, .init_client_instance = hclge_init_client_instance, .uninit_client_instance = hclge_uninit_client_instance, - .map_ring_to_vector = hclge_map_handle_ring_to_vector, - .unmap_ring_from_vector = hclge_unmap_ring_from_vector, + .map_ring_to_vector = hclge_map_ring_to_vector, + .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, .get_vector = hclge_get_vector, .set_promisc_mode = hclge_set_promisc_mode, .set_loopback = hclge_set_loopback, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 028817c7d67c..c7c9a28b97ca 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -539,8 +539,10 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, u8 func_id, bool enable); struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle); -int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector, - struct hnae3_ring_chain_node *ring_chain); +int hclge_bind_ring_with_vector(struct hclge_vport *vport, + int vector_id, bool en, + struct hnae3_ring_chain_node *ring_chain); + static inline int hclge_get_queue_id(struct hnae3_queue *queue) { struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q); @@ -556,4 +558,5 @@ int hclge_buffer_alloc(struct hclge_dev *hdev); int hclge_rss_init_hw(struct hclge_dev *hdev); void hclge_mbx_handler(struct hclge_dev *hdev); +void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 24b1f14133b4..96f453ff84b5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -79,6 +79,91 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, return status; } +static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head) +{ + struct hnae3_ring_chain_node *chain_tmp, *chain; + + chain = head->next; + + while (chain) { + chain_tmp = chain->next; + kzfree(chain); + chain = chain_tmp; + } +} + +/* hclge_get_ring_chain_from_mbx: get ring type & tqpid from mailbox message + * msg[0]: opcode + * msg[1]: + * msg[2]: ring_num + * msg[3]: first ring type (TX|RX) + * msg[4]: first tqp id + * msg[5] ~ msg[14]: other ring type and tqp id + */ +static int hclge_get_ring_chain_from_mbx( + struct hclge_mbx_vf_to_pf_cmd *req, + struct hnae3_ring_chain_node *ring_chain, + struct hclge_vport *vport) +{ +#define HCLGE_RING_NODE_VARIABLE_NUM 3 +#define HCLGE_RING_MAP_MBX_BASIC_MSG_NUM 3 + struct hnae3_ring_chain_node *cur_chain, *new_chain; + int ring_num; + int i; + + ring_num = req->msg[2]; + + hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]); + ring_chain->tqp_index = + hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]); + + cur_chain = ring_chain; + + for (i = 1; i < ring_num; i++) { + new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL); + if (!new_chain) + goto err; + + hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B, + req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i + + HCLGE_RING_MAP_MBX_BASIC_MSG_NUM]); + + new_chain->tqp_index = + hclge_get_queue_id(vport->nic.kinfo.tqp + [req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i + + HCLGE_RING_MAP_MBX_BASIC_MSG_NUM + 1]]); + + cur_chain->next = new_chain; + cur_chain = new_chain; + } + + return 0; +err: + hclge_free_vector_ring_chain(ring_chain); + return -ENOMEM; +} + +static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en, + struct hclge_mbx_vf_to_pf_cmd *req) +{ + struct hnae3_ring_chain_node ring_chain; + int vector_id = req->msg[1]; + int ret; + + memset(&ring_chain, 0, sizeof(ring_chain)); + ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport); + if (ret) + return ret; + + ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain); + if (ret) + return ret; + + hclge_free_vector_ring_chain(&ring_chain); + + return 0; +} + static int hclge_set_vf_promisc_mode(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *req) { @@ -224,6 +309,16 @@ static int hclge_get_link_info(struct hclge_vport *vport, HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid); } +static void hclge_reset_vf_queue(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + u16 queue_id; + + memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id)); + + hclge_reset_tqp(&vport->nic, queue_id); +} + void hclge_mbx_handler(struct hclge_dev *hdev) { struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq; @@ -241,6 +336,14 @@ void hclge_mbx_handler(struct hclge_dev *hdev) vport = &hdev->vport[req->mbx_src_vfid]; switch (req->msg[0]) { + case HCLGE_MBX_MAP_RING_TO_VECTOR: + ret = hclge_map_unmap_ring_to_vf_vector(vport, true, + req); + break; + case HCLGE_MBX_UNMAP_RING_TO_VECTOR: + ret = hclge_map_unmap_ring_to_vf_vector(vport, false, + req); + break; case HCLGE_MBX_SET_PROMISC_MODE: ret = hclge_set_vf_promisc_mode(vport, req); if (ret) @@ -290,6 +393,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev) "PF fail(%d) to get link stat for VF\n", ret); break; + case HCLGE_MBX_QUEUE_RESET: + hclge_reset_vf_queue(vport, req); + break; default: dev_err(&hdev->pdev->dev, "un-supported mailbox message, code = %d\n", -- cgit From c1a81619d73a436f4b796b44c2711c68aec9b787 Mon Sep 17 00:00:00 2001 From: Salil Mehta Date: Thu, 14 Dec 2017 18:03:09 +0000 Subject: net: hns3: Add mailbox interrupt handling to PF driver All PF mailbox events are conveyed through a common interrupt (vector 0). This interrupt vector is shared by reset and mailbox. This patch adds the handling of mailbox interrupt event and its deferred processing in context to a separate mailbox task. Signed-off-by: Salil Mehta Signed-off-by: lipeng Signed-off-by: David S. Miller --- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 68 +++++++++++++++++++--- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 8 ++- 2 files changed, 68 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 3b1fc493b318..e97fd6654e5e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2227,6 +2227,12 @@ static int hclge_mac_init(struct hclge_dev *hdev) return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc); } +static void hclge_mbx_task_schedule(struct hclge_dev *hdev) +{ + if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) + schedule_work(&hdev->mbx_service_task); +} + static void hclge_reset_task_schedule(struct hclge_dev *hdev) { if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) @@ -2372,9 +2378,18 @@ static void hclge_service_complete(struct hclge_dev *hdev) static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) { u32 rst_src_reg; + u32 cmdq_src_reg; /* fetch the events from their corresponding regs */ rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG); + cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); + + /* Assumption: If by any chance reset and mailbox events are reported + * together then we will only process reset event in this go and will + * defer the processing of the mailbox events. Since, we would have not + * cleared RX CMDQ event this time we would receive again another + * interrupt from H/W just for the mailbox. + */ /* check for vector0 reset event sources */ if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { @@ -2395,7 +2410,12 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) return HCLGE_VECTOR0_EVENT_RST; } - /* mailbox event sharing vector 0 interrupt would be placed here */ + /* check for vector0 mailbox(=CMDQ RX) event source */ + if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { + cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); + *clearval = cmdq_src_reg; + return HCLGE_VECTOR0_EVENT_MBX; + } return HCLGE_VECTOR0_EVENT_OTHER; } @@ -2403,10 +2423,14 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, u32 regclr) { - if (event_type == HCLGE_VECTOR0_EVENT_RST) + switch (event_type) { + case HCLGE_VECTOR0_EVENT_RST: hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); - - /* mailbox event sharing vector 0 interrupt would be placed here */ + break; + case HCLGE_VECTOR0_EVENT_MBX: + hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); + break; + } } static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) @@ -2423,13 +2447,23 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data) hclge_enable_vector(&hdev->misc_vector, false); event_cause = hclge_check_event_cause(hdev, &clearval); - /* vector 0 interrupt is shared with reset and mailbox source events. - * For now, we are not handling mailbox events. - */ + /* vector 0 interrupt is shared with reset and mailbox source events.*/ switch (event_cause) { case HCLGE_VECTOR0_EVENT_RST: hclge_reset_task_schedule(hdev); break; + case HCLGE_VECTOR0_EVENT_MBX: + /* If we are here then, + * 1. Either we are not handling any mbx task and we are not + * scheduled as well + * OR + * 2. We could be handling a mbx task but nothing more is + * scheduled. + * In both cases, we should schedule mbx task as there are more + * mbx messages reported by this interrupt. + */ + hclge_mbx_task_schedule(hdev); + default: dev_dbg(&hdev->pdev->dev, "received unknown or unhandled event of vector0\n"); @@ -2708,6 +2742,21 @@ static void hclge_reset_service_task(struct work_struct *work) clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); } +static void hclge_mailbox_service_task(struct work_struct *work) +{ + struct hclge_dev *hdev = + container_of(work, struct hclge_dev, mbx_service_task); + + if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) + return; + + clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); + + hclge_mbx_handler(hdev); + + clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); +} + static void hclge_service_task(struct work_struct *work) { struct hclge_dev *hdev = @@ -4815,6 +4864,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) timer_setup(&hdev->service_timer, hclge_service_timer, 0); INIT_WORK(&hdev->service_task, hclge_service_task); INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); + INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); /* Enable MISC vector(vector0) */ hclge_enable_vector(&hdev->misc_vector, true); @@ -4823,6 +4873,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) set_bit(HCLGE_STATE_DOWN, &hdev->state); clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); + clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); return 0; @@ -4936,6 +4988,8 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) cancel_work_sync(&hdev->service_task); if (hdev->rst_service_task.func) cancel_work_sync(&hdev->rst_service_task); + if (hdev->mbx_service_task.func) + cancel_work_sync(&hdev->mbx_service_task); if (mac->phydev) mdiobus_unregister(mac->mdio_bus); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index c7c9a28b97ca..fb043b54583d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -92,6 +92,11 @@ #define HCLGE_VECTOR0_CORERESET_INT_B 6 #define HCLGE_VECTOR0_IMPRESET_INT_B 7 +/* Vector0 interrupt CMDQ event source register(RW) */ +#define HCLGE_VECTOR0_CMDQ_SRC_REG 0x27100 +/* CMDQ register bits for RX event(=MBX event) */ +#define HCLGE_VECTOR0_RX_CMDQ_INT_B 1 + enum HCLGE_DEV_STATE { HCLGE_STATE_REINITING, HCLGE_STATE_DOWN, @@ -101,8 +106,8 @@ enum HCLGE_DEV_STATE { HCLGE_STATE_SERVICE_SCHED, HCLGE_STATE_RST_SERVICE_SCHED, HCLGE_STATE_RST_HANDLING, + HCLGE_STATE_MBX_SERVICE_SCHED, HCLGE_STATE_MBX_HANDLING, - HCLGE_STATE_MBX_IRQ, HCLGE_STATE_MAX }; @@ -479,6 +484,7 @@ struct hclge_dev { struct timer_list service_timer; struct work_struct service_task; struct work_struct rst_service_task; + struct work_struct mbx_service_task; bool cur_promisc; int num_alloc_vfs; /* Actual number of VFs allocated */ -- cgit