diff options
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hibmcge')
18 files changed, 2496 insertions, 0 deletions
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/Makefile b/drivers/net/ethernet/hisilicon/hibmcge/Makefile new file mode 100644 index 000000000000..7ea15f9ef849 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# Makefile for the HISILICON BMC GE network device drivers. +# + +obj-$(CONFIG_HIBMCGE) += hibmcge.o + +hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o \ + hbg_debugfs.o hbg_err.o diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h new file mode 100644 index 000000000000..b4300d8ea4ad --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2024 Hisilicon Limited. */ + +#ifndef __HBG_COMMON_H +#define __HBG_COMMON_H + +#include <linux/ethtool.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include "hbg_reg.h" + +#define HBG_STATUS_DISABLE 0x0 +#define HBG_STATUS_ENABLE 0x1 +#define HBG_RX_SKIP1 0x00 +#define HBG_RX_SKIP2 0x01 +#define HBG_VECTOR_NUM 4 +#define HBG_PCU_CACHE_LINE_SIZE 32 +#define HBG_TX_TIMEOUT_BUF_LEN 1024 +#define HBG_RX_DESCR 0x01 + +#define HBG_PACKET_HEAD_SIZE ((HBG_RX_SKIP1 + HBG_RX_SKIP2 + \ + HBG_RX_DESCR) * HBG_PCU_CACHE_LINE_SIZE) + +enum hbg_dir { + HBG_DIR_TX = 1 << 0, + HBG_DIR_RX = 1 << 1, + HBG_DIR_TX_RX = HBG_DIR_TX | HBG_DIR_RX, +}; + +enum hbg_tx_state { + HBG_TX_STATE_COMPLETE = 0, /* clear state, must fix to 0 */ + HBG_TX_STATE_START, +}; + +enum hbg_nic_state { + HBG_NIC_STATE_EVENT_HANDLING = 0, + HBG_NIC_STATE_RESETTING, + HBG_NIC_STATE_RESET_FAIL, +}; + +enum hbg_reset_type { + HBG_RESET_TYPE_NONE = 0, + HBG_RESET_TYPE_FLR, + HBG_RESET_TYPE_FUNCTION, +}; + +struct hbg_buffer { + u32 state; + dma_addr_t state_dma; + + struct sk_buff *skb; + dma_addr_t skb_dma; + u32 skb_len; + + enum hbg_dir dir; + struct hbg_ring *ring; + struct hbg_priv *priv; +}; + +struct hbg_ring { + struct hbg_buffer *queue; + dma_addr_t queue_dma; + + union { + u32 head; + u32 ntc; + }; + union { + u32 tail; + u32 ntu; + }; + u32 len; + + enum hbg_dir dir; + struct hbg_priv *priv; + struct napi_struct napi; + char *tout_log_buf; /* tx timeout log buffer */ +}; + +enum hbg_hw_event_type { + HBG_HW_EVENT_NONE = 0, + HBG_HW_EVENT_INIT, /* driver is loading */ + HBG_HW_EVENT_RESET, +}; + +struct hbg_dev_specs { + u32 mac_id; + struct sockaddr mac_addr; + u32 phy_addr; + u32 mdio_frequency; + u32 rx_fifo_num; + u32 tx_fifo_num; + u32 vlan_layers; + u32 max_mtu; + u32 min_mtu; + u32 uc_mac_num; + + u32 max_frame_len; + u32 rx_buf_size; +}; + +struct hbg_irq_info { + const char *name; + u32 mask; + bool re_enable; + bool need_print; + u64 count; + + void (*irq_handle)(struct hbg_priv *priv, struct hbg_irq_info *info); +}; + +struct hbg_vector { + char name[HBG_VECTOR_NUM][32]; + struct hbg_irq_info *info_array; + u32 info_array_len; +}; + +struct hbg_mac { + struct mii_bus *mdio_bus; + struct phy_device *phydev; + u8 phy_addr; + + u32 speed; + u32 duplex; + u32 autoneg; + u32 link_status; + u32 pause_autoneg; +}; + +struct hbg_mac_table_entry { + u8 addr[ETH_ALEN]; +}; + +struct hbg_mac_filter { + struct hbg_mac_table_entry *mac_table; + u32 table_max_len; + bool enabled; +}; + +/* saved for restore after rest */ +struct hbg_user_def { + struct ethtool_pauseparam pause_param; +}; + +struct hbg_priv { + struct net_device *netdev; + struct pci_dev *pdev; + u8 __iomem *io_base; + struct hbg_dev_specs dev_specs; + unsigned long state; + struct hbg_mac mac; + struct hbg_vector vectors; + struct hbg_ring tx_ring; + struct hbg_ring rx_ring; + struct hbg_mac_filter filter; + enum hbg_reset_type reset_type; + struct hbg_user_def user_def; +}; + +#endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c new file mode 100644 index 000000000000..8473c43d171a --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2024 Hisilicon Limited. + +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/etherdevice.h> +#include <linux/seq_file.h> +#include <linux/string_choices.h> +#include "hbg_common.h" +#include "hbg_debugfs.h" +#include "hbg_hw.h" +#include "hbg_irq.h" +#include "hbg_txrx.h" + +static struct dentry *hbg_dbgfs_root; + +struct hbg_dbg_info { + const char *name; + int (*read)(struct seq_file *seq, void *data); +}; + +#define state_str_true_false(p, s) str_true_false(test_bit(s, &(p)->state)) + +static void hbg_dbg_ring(struct hbg_priv *priv, struct hbg_ring *ring, + struct seq_file *s) +{ + u32 irq_mask = ring->dir == HBG_DIR_TX ? HBG_INT_MSK_TX_B : + HBG_INT_MSK_RX_B; + + seq_printf(s, "ring used num: %u\n", + hbg_get_queue_used_num(ring)); + seq_printf(s, "ring max num: %u\n", ring->len); + seq_printf(s, "ring head: %u, tail: %u\n", ring->head, ring->tail); + seq_printf(s, "fifo used num: %u\n", + hbg_hw_get_fifo_used_num(priv, ring->dir)); + seq_printf(s, "fifo max num: %u\n", + hbg_get_spec_fifo_max_num(priv, ring->dir)); + seq_printf(s, "irq enabled: %s\n", + str_true_false(hbg_hw_irq_is_enabled(priv, irq_mask))); +} + +static int hbg_dbg_tx_ring(struct seq_file *s, void *unused) +{ + struct net_device *netdev = dev_get_drvdata(s->private); + struct hbg_priv *priv = netdev_priv(netdev); + + hbg_dbg_ring(priv, &priv->tx_ring, s); + return 0; +} + +static int hbg_dbg_rx_ring(struct seq_file *s, void *unused) +{ + struct net_device *netdev = dev_get_drvdata(s->private); + struct hbg_priv *priv = netdev_priv(netdev); + + hbg_dbg_ring(priv, &priv->rx_ring, s); + return 0; +} + +static int hbg_dbg_irq_info(struct seq_file *s, void *unused) +{ + struct net_device *netdev = dev_get_drvdata(s->private); + struct hbg_priv *priv = netdev_priv(netdev); + struct hbg_irq_info *info; + u32 i; + + for (i = 0; i < priv->vectors.info_array_len; i++) { + info = &priv->vectors.info_array[i]; + seq_printf(s, + "%-20s: enabled: %-5s, logged: %-5s, count: %llu\n", + info->name, + str_true_false(hbg_hw_irq_is_enabled(priv, + info->mask)), + str_true_false(info->need_print), + info->count); + } + + return 0; +} + +static int hbg_dbg_mac_table(struct seq_file *s, void *unused) +{ + struct net_device *netdev = dev_get_drvdata(s->private); + struct hbg_priv *priv = netdev_priv(netdev); + struct hbg_mac_filter *filter; + u32 i; + + filter = &priv->filter; + seq_printf(s, "mac addr max count: %u\n", filter->table_max_len); + seq_printf(s, "filter enabled: %s\n", str_true_false(filter->enabled)); + + for (i = 0; i < filter->table_max_len; i++) { + if (is_zero_ether_addr(filter->mac_table[i].addr)) + continue; + + seq_printf(s, "[%u] %pM\n", i, filter->mac_table[i].addr); + } + + return 0; +} + +static const char * const reset_type_str[] = {"None", "FLR", "Function"}; + +static int hbg_dbg_nic_state(struct seq_file *s, void *unused) +{ + struct net_device *netdev = dev_get_drvdata(s->private); + struct hbg_priv *priv = netdev_priv(netdev); + + seq_printf(s, "event handling state: %s\n", + state_str_true_false(priv, HBG_NIC_STATE_EVENT_HANDLING)); + seq_printf(s, "resetting state: %s\n", + state_str_true_false(priv, HBG_NIC_STATE_RESETTING)); + seq_printf(s, "reset fail state: %s\n", + state_str_true_false(priv, HBG_NIC_STATE_RESET_FAIL)); + seq_printf(s, "last reset type: %s\n", + reset_type_str[priv->reset_type]); + + return 0; +} + +static const struct hbg_dbg_info hbg_dbg_infos[] = { + { "tx_ring", hbg_dbg_tx_ring }, + { "rx_ring", hbg_dbg_rx_ring }, + { "irq_info", hbg_dbg_irq_info }, + { "mac_table", hbg_dbg_mac_table }, + { "nic_state", hbg_dbg_nic_state }, +}; + +static void hbg_debugfs_uninit(void *data) +{ + debugfs_remove_recursive((struct dentry *)data); +} + +void hbg_debugfs_init(struct hbg_priv *priv) +{ + const char *name = pci_name(priv->pdev); + struct device *dev = &priv->pdev->dev; + struct dentry *root; + u32 i; + + root = debugfs_create_dir(name, hbg_dbgfs_root); + + for (i = 0; i < ARRAY_SIZE(hbg_dbg_infos); i++) + debugfs_create_devm_seqfile(dev, hbg_dbg_infos[i].name, + root, hbg_dbg_infos[i].read); + + /* Ignore the failure because debugfs is not a key feature. */ + devm_add_action_or_reset(dev, hbg_debugfs_uninit, root); +} + +void hbg_debugfs_register(void) +{ + hbg_dbgfs_root = debugfs_create_dir("hibmcge", NULL); +} + +void hbg_debugfs_unregister(void) +{ + debugfs_remove_recursive(hbg_dbgfs_root); + hbg_dbgfs_root = NULL; +} diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h new file mode 100644 index 000000000000..80670d66bbeb --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2024 Hisilicon Limited. */ + +#ifndef __HBG_DEBUGFS_H +#define __HBG_DEBUGFS_H + +void hbg_debugfs_register(void); +void hbg_debugfs_unregister(void); + +void hbg_debugfs_init(struct hbg_priv *priv); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c new file mode 100644 index 000000000000..4d1f4a33391a --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2024 Hisilicon Limited. + +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/phy.h> +#include <linux/rtnetlink.h> +#include "hbg_common.h" +#include "hbg_err.h" +#include "hbg_hw.h" + +static void hbg_restore_mac_table(struct hbg_priv *priv) +{ + struct hbg_mac_filter *filter = &priv->filter; + u64 addr; + u32 i; + + for (i = 0; i < filter->table_max_len; i++) + if (!is_zero_ether_addr(filter->mac_table[i].addr)) { + addr = ether_addr_to_u64(filter->mac_table[i].addr); + hbg_hw_set_uc_addr(priv, addr, i); + } + + hbg_hw_set_mac_filter_enable(priv, priv->filter.enabled); +} + +static void hbg_restore_user_def_settings(struct hbg_priv *priv) +{ + struct ethtool_pauseparam *pause_param = &priv->user_def.pause_param; + + hbg_restore_mac_table(priv); + hbg_hw_set_mtu(priv, priv->netdev->mtu); + hbg_hw_set_pause_enable(priv, pause_param->tx_pause, + pause_param->rx_pause); +} + +int hbg_rebuild(struct hbg_priv *priv) +{ + int ret; + + ret = hbg_hw_init(priv); + if (ret) + return ret; + + hbg_restore_user_def_settings(priv); + return 0; +} + +static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type) +{ + int ret; + + ASSERT_RTNL(); + + if (netif_running(priv->netdev)) { + dev_warn(&priv->pdev->dev, + "failed to reset because port is up\n"); + return -EBUSY; + } + + priv->reset_type = type; + set_bit(HBG_NIC_STATE_RESETTING, &priv->state); + clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state); + ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_RESET); + if (ret) { + set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state); + clear_bit(HBG_NIC_STATE_RESETTING, &priv->state); + } + + return ret; +} + +static int hbg_reset_done(struct hbg_priv *priv, enum hbg_reset_type type) +{ + int ret; + + if (!test_bit(HBG_NIC_STATE_RESETTING, &priv->state) || + type != priv->reset_type) + return 0; + + ASSERT_RTNL(); + + clear_bit(HBG_NIC_STATE_RESETTING, &priv->state); + ret = hbg_rebuild(priv); + if (ret) { + set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state); + dev_err(&priv->pdev->dev, "failed to rebuild after reset\n"); + return ret; + } + + dev_info(&priv->pdev->dev, "reset done\n"); + return ret; +} + +/* must be protected by rtnl lock */ +int hbg_reset(struct hbg_priv *priv) +{ + int ret; + + ASSERT_RTNL(); + ret = hbg_reset_prepare(priv, HBG_RESET_TYPE_FUNCTION); + if (ret) + return ret; + + return hbg_reset_done(priv, HBG_RESET_TYPE_FUNCTION); +} + +static void hbg_pci_err_reset_prepare(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct hbg_priv *priv = netdev_priv(netdev); + + rtnl_lock(); + hbg_reset_prepare(priv, HBG_RESET_TYPE_FLR); +} + +static void hbg_pci_err_reset_done(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct hbg_priv *priv = netdev_priv(netdev); + + hbg_reset_done(priv, HBG_RESET_TYPE_FLR); + rtnl_unlock(); +} + +static const struct pci_error_handlers hbg_pci_err_handler = { + .reset_prepare = hbg_pci_err_reset_prepare, + .reset_done = hbg_pci_err_reset_done, +}; + +void hbg_set_pci_err_handler(struct pci_driver *pdrv) +{ + pdrv->err_handler = &hbg_pci_err_handler; +} diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h new file mode 100644 index 000000000000..d7828e446308 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2024 Hisilicon Limited. */ + +#ifndef __HBG_ERR_H +#define __HBG_ERR_H + +#include <linux/pci.h> + +void hbg_set_pci_err_handler(struct pci_driver *pdrv); +int hbg_reset(struct hbg_priv *priv); +int hbg_rebuild(struct hbg_priv *priv); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c new file mode 100644 index 000000000000..00364a438ec2 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2024 Hisilicon Limited. + +#include <linux/ethtool.h> +#include <linux/phy.h> +#include <linux/rtnetlink.h> +#include "hbg_common.h" +#include "hbg_err.h" +#include "hbg_ethtool.h" +#include "hbg_hw.h" + +enum hbg_reg_dump_type { + HBG_DUMP_REG_TYPE_SPEC = 0, + HBG_DUMP_REG_TYPE_MDIO, + HBG_DUMP_REG_TYPE_GMAC, + HBG_DUMP_REG_TYPE_PCU, +}; + +struct hbg_reg_info { + u32 type; + u32 offset; + u32 val; +}; + +#define HBG_DUMP_SPEC_I(offset) {HBG_DUMP_REG_TYPE_SPEC, offset, 0} +#define HBG_DUMP_MDIO_I(offset) {HBG_DUMP_REG_TYPE_MDIO, offset, 0} +#define HBG_DUMP_GMAC_I(offset) {HBG_DUMP_REG_TYPE_GMAC, offset, 0} +#define HBG_DUMP_PCU_I(offset) {HBG_DUMP_REG_TYPE_PCU, offset, 0} + +static const struct hbg_reg_info hbg_dump_reg_infos[] = { + /* dev specs */ + HBG_DUMP_SPEC_I(HBG_REG_SPEC_VALID_ADDR), + HBG_DUMP_SPEC_I(HBG_REG_EVENT_REQ_ADDR), + HBG_DUMP_SPEC_I(HBG_REG_MAC_ID_ADDR), + HBG_DUMP_SPEC_I(HBG_REG_PHY_ID_ADDR), + HBG_DUMP_SPEC_I(HBG_REG_MAC_ADDR_ADDR), + HBG_DUMP_SPEC_I(HBG_REG_MAC_ADDR_HIGH_ADDR), + HBG_DUMP_SPEC_I(HBG_REG_UC_MAC_NUM_ADDR), + HBG_DUMP_SPEC_I(HBG_REG_MDIO_FREQ_ADDR), + HBG_DUMP_SPEC_I(HBG_REG_MAX_MTU_ADDR), + HBG_DUMP_SPEC_I(HBG_REG_MIN_MTU_ADDR), + HBG_DUMP_SPEC_I(HBG_REG_TX_FIFO_NUM_ADDR), + HBG_DUMP_SPEC_I(HBG_REG_RX_FIFO_NUM_ADDR), + HBG_DUMP_SPEC_I(HBG_REG_VLAN_LAYERS_ADDR), + + /* mdio */ + HBG_DUMP_MDIO_I(HBG_REG_MDIO_COMMAND_ADDR), + HBG_DUMP_MDIO_I(HBG_REG_MDIO_ADDR_ADDR), + HBG_DUMP_MDIO_I(HBG_REG_MDIO_WDATA_ADDR), + HBG_DUMP_MDIO_I(HBG_REG_MDIO_RDATA_ADDR), + HBG_DUMP_MDIO_I(HBG_REG_MDIO_STA_ADDR), + + /* gmac */ + HBG_DUMP_GMAC_I(HBG_REG_DUPLEX_TYPE_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_FD_FC_TYPE_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_FC_TX_TIMER_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_FD_FC_ADDR_LOW_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_FD_FC_ADDR_HIGH_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_MAX_FRAME_SIZE_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_PORT_MODE_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_PORT_ENABLE_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_PAUSE_ENABLE_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_AN_NEG_STATE_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_TRANSMIT_CTRL_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_REC_FILT_CTRL_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_LINE_LOOP_BACK_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_CF_CRC_STRIP_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_MODE_CHANGE_EN_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_LOOP_REG_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_RECV_CTRL_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_VLAN_CODE_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_0_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_0_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_1_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_1_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_2_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_2_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_3_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_3_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_4_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_4_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_5_ADDR), + HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_5_ADDR), + + /* pcu */ + HBG_DUMP_PCU_I(HBG_REG_TX_FIFO_THRSLD_ADDR), + HBG_DUMP_PCU_I(HBG_REG_RX_FIFO_THRSLD_ADDR), + HBG_DUMP_PCU_I(HBG_REG_CFG_FIFO_THRSLD_ADDR), + HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_MSK_ADDR), + HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_STAT_ADDR), + HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_CLR_ADDR), + HBG_DUMP_PCU_I(HBG_REG_TX_BUS_ERR_ADDR_ADDR), + HBG_DUMP_PCU_I(HBG_REG_RX_BUS_ERR_ADDR_ADDR), + HBG_DUMP_PCU_I(HBG_REG_MAX_FRAME_LEN_ADDR), + HBG_DUMP_PCU_I(HBG_REG_DEBUG_ST_MCH_ADDR), + HBG_DUMP_PCU_I(HBG_REG_FIFO_CURR_STATUS_ADDR), + HBG_DUMP_PCU_I(HBG_REG_FIFO_HIST_STATUS_ADDR), + HBG_DUMP_PCU_I(HBG_REG_CF_CFF_DATA_NUM_ADDR), + HBG_DUMP_PCU_I(HBG_REG_CF_TX_PAUSE_ADDR), + HBG_DUMP_PCU_I(HBG_REG_RX_CFF_ADDR_ADDR), + HBG_DUMP_PCU_I(HBG_REG_RX_BUF_SIZE_ADDR), + HBG_DUMP_PCU_I(HBG_REG_BUS_CTRL_ADDR), + HBG_DUMP_PCU_I(HBG_REG_RX_CTRL_ADDR), + HBG_DUMP_PCU_I(HBG_REG_RX_PKT_MODE_ADDR), + HBG_DUMP_PCU_I(HBG_REG_DBG_ST0_ADDR), + HBG_DUMP_PCU_I(HBG_REG_DBG_ST1_ADDR), + HBG_DUMP_PCU_I(HBG_REG_DBG_ST2_ADDR), + HBG_DUMP_PCU_I(HBG_REG_BUS_RST_EN_ADDR), + HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_MSK_ADDR), + HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_STAT_ADDR), + HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_CLR_ADDR), + HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_MSK_ADDR), + HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_STAT_ADDR), + HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_CLR_ADDR), +}; + +static const u32 hbg_dump_type_base_array[] = { + [HBG_DUMP_REG_TYPE_SPEC] = 0, + [HBG_DUMP_REG_TYPE_MDIO] = HBG_REG_MDIO_BASE, + [HBG_DUMP_REG_TYPE_GMAC] = HBG_REG_SGMII_BASE, + [HBG_DUMP_REG_TYPE_PCU] = HBG_REG_SGMII_BASE, +}; + +static int hbg_ethtool_get_regs_len(struct net_device *netdev) +{ + return ARRAY_SIZE(hbg_dump_reg_infos) * sizeof(struct hbg_reg_info); +} + +static void hbg_ethtool_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *data) +{ + struct hbg_priv *priv = netdev_priv(netdev); + struct hbg_reg_info *info; + u32 i, offset = 0; + + regs->version = 0; + for (i = 0; i < ARRAY_SIZE(hbg_dump_reg_infos); i++) { + info = data + offset; + + *info = hbg_dump_reg_infos[i]; + info->val = hbg_reg_read(priv, info->offset); + info->offset -= hbg_dump_type_base_array[info->type]; + + offset += sizeof(*info); + } +} + +static void hbg_ethtool_get_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *param) +{ + struct hbg_priv *priv = netdev_priv(net_dev); + + param->autoneg = priv->mac.pause_autoneg; + hbg_hw_get_pause_enable(priv, ¶m->tx_pause, ¶m->rx_pause); +} + +static int hbg_ethtool_set_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *param) +{ + struct hbg_priv *priv = netdev_priv(net_dev); + + priv->mac.pause_autoneg = param->autoneg; + phy_set_asym_pause(priv->mac.phydev, param->rx_pause, param->tx_pause); + + if (!param->autoneg) + hbg_hw_set_pause_enable(priv, param->tx_pause, param->rx_pause); + + priv->user_def.pause_param = *param; + return 0; +} + +static int hbg_ethtool_reset(struct net_device *netdev, u32 *flags) +{ + struct hbg_priv *priv = netdev_priv(netdev); + + if (*flags != ETH_RESET_DEDICATED) + return -EOPNOTSUPP; + + *flags = 0; + return hbg_reset(priv); +} + +static const struct ethtool_ops hbg_ethtool_ops = { + .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_regs_len = hbg_ethtool_get_regs_len, + .get_regs = hbg_ethtool_get_regs, + .get_pauseparam = hbg_ethtool_get_pauseparam, + .set_pauseparam = hbg_ethtool_set_pauseparam, + .reset = hbg_ethtool_reset, + .nway_reset = phy_ethtool_nway_reset, +}; + +void hbg_ethtool_set_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &hbg_ethtool_ops; +} diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h new file mode 100644 index 000000000000..628707ec2686 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2024 Hisilicon Limited. */ + +#ifndef __HBG_ETHTOOL_H +#define __HBG_ETHTOOL_H + +#include <linux/netdevice.h> + +void hbg_ethtool_set_ops(struct net_device *netdev); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c new file mode 100644 index 000000000000..e7798f213645 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c @@ -0,0 +1,311 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2024 Hisilicon Limited. + +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/if_vlan.h> +#include <linux/iopoll.h> +#include <linux/minmax.h> +#include "hbg_common.h" +#include "hbg_hw.h" +#include "hbg_reg.h" + +#define HBG_HW_EVENT_WAIT_TIMEOUT_US (2 * 1000 * 1000) +#define HBG_HW_EVENT_WAIT_INTERVAL_US (10 * 1000) +/* little endian or big endian. + * ctrl means packet description, data means skb packet data + */ +#define HBG_ENDIAN_CTRL_LE_DATA_BE 0x0 +#define HBG_PCU_FRAME_LEN_PLUS 4 + +static bool hbg_hw_spec_is_valid(struct hbg_priv *priv) +{ + return hbg_reg_read(priv, HBG_REG_SPEC_VALID_ADDR) && + !hbg_reg_read(priv, HBG_REG_EVENT_REQ_ADDR); +} + +int hbg_hw_event_notify(struct hbg_priv *priv, + enum hbg_hw_event_type event_type) +{ + bool is_valid; + int ret; + + if (test_and_set_bit(HBG_NIC_STATE_EVENT_HANDLING, &priv->state)) + return -EBUSY; + + /* notify */ + hbg_reg_write(priv, HBG_REG_EVENT_REQ_ADDR, event_type); + + ret = read_poll_timeout(hbg_hw_spec_is_valid, is_valid, is_valid, + HBG_HW_EVENT_WAIT_INTERVAL_US, + HBG_HW_EVENT_WAIT_TIMEOUT_US, + HBG_HW_EVENT_WAIT_INTERVAL_US, priv); + + clear_bit(HBG_NIC_STATE_EVENT_HANDLING, &priv->state); + + if (ret) + dev_err(&priv->pdev->dev, + "event %d wait timeout\n", event_type); + + return ret; +} + +static int hbg_hw_dev_specs_init(struct hbg_priv *priv) +{ + struct hbg_dev_specs *specs = &priv->dev_specs; + u64 mac_addr; + + if (!hbg_hw_spec_is_valid(priv)) { + dev_err(&priv->pdev->dev, "dev_specs not init\n"); + return -EINVAL; + } + + specs->mac_id = hbg_reg_read(priv, HBG_REG_MAC_ID_ADDR); + specs->phy_addr = hbg_reg_read(priv, HBG_REG_PHY_ID_ADDR); + specs->mdio_frequency = hbg_reg_read(priv, HBG_REG_MDIO_FREQ_ADDR); + specs->max_mtu = hbg_reg_read(priv, HBG_REG_MAX_MTU_ADDR); + specs->min_mtu = hbg_reg_read(priv, HBG_REG_MIN_MTU_ADDR); + specs->vlan_layers = hbg_reg_read(priv, HBG_REG_VLAN_LAYERS_ADDR); + specs->rx_fifo_num = hbg_reg_read(priv, HBG_REG_RX_FIFO_NUM_ADDR); + specs->tx_fifo_num = hbg_reg_read(priv, HBG_REG_TX_FIFO_NUM_ADDR); + specs->uc_mac_num = hbg_reg_read(priv, HBG_REG_UC_MAC_NUM_ADDR); + + mac_addr = hbg_reg_read64(priv, HBG_REG_MAC_ADDR_ADDR); + u64_to_ether_addr(mac_addr, (u8 *)specs->mac_addr.sa_data); + + if (!is_valid_ether_addr((u8 *)specs->mac_addr.sa_data)) + return -EADDRNOTAVAIL; + + specs->max_frame_len = HBG_PCU_CACHE_LINE_SIZE + specs->max_mtu; + specs->rx_buf_size = HBG_PACKET_HEAD_SIZE + specs->max_frame_len; + return 0; +} + +u32 hbg_hw_get_irq_status(struct hbg_priv *priv) +{ + u32 status; + + status = hbg_reg_read(priv, HBG_REG_CF_INTRPT_STAT_ADDR); + + hbg_field_modify(status, HBG_INT_MSK_TX_B, + hbg_reg_read(priv, HBG_REG_CF_IND_TXINT_STAT_ADDR)); + hbg_field_modify(status, HBG_INT_MSK_RX_B, + hbg_reg_read(priv, HBG_REG_CF_IND_RXINT_STAT_ADDR)); + + return status; +} + +void hbg_hw_irq_clear(struct hbg_priv *priv, u32 mask) +{ + if (FIELD_GET(HBG_INT_MSK_TX_B, mask)) + return hbg_reg_write(priv, HBG_REG_CF_IND_TXINT_CLR_ADDR, 0x1); + + if (FIELD_GET(HBG_INT_MSK_RX_B, mask)) + return hbg_reg_write(priv, HBG_REG_CF_IND_RXINT_CLR_ADDR, 0x1); + + return hbg_reg_write(priv, HBG_REG_CF_INTRPT_CLR_ADDR, mask); +} + +bool hbg_hw_irq_is_enabled(struct hbg_priv *priv, u32 mask) +{ + if (FIELD_GET(HBG_INT_MSK_TX_B, mask)) + return hbg_reg_read(priv, HBG_REG_CF_IND_TXINT_MSK_ADDR); + + if (FIELD_GET(HBG_INT_MSK_RX_B, mask)) + return hbg_reg_read(priv, HBG_REG_CF_IND_RXINT_MSK_ADDR); + + return hbg_reg_read(priv, HBG_REG_CF_INTRPT_MSK_ADDR) & mask; +} + +void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable) +{ + u32 value; + + if (FIELD_GET(HBG_INT_MSK_TX_B, mask)) + return hbg_reg_write(priv, + HBG_REG_CF_IND_TXINT_MSK_ADDR, enable); + + if (FIELD_GET(HBG_INT_MSK_RX_B, mask)) + return hbg_reg_write(priv, + HBG_REG_CF_IND_RXINT_MSK_ADDR, enable); + + value = hbg_reg_read(priv, HBG_REG_CF_INTRPT_MSK_ADDR); + if (enable) + value |= mask; + else + value &= ~mask; + + hbg_reg_write(priv, HBG_REG_CF_INTRPT_MSK_ADDR, value); +} + +void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr, u32 index) +{ + u32 addr; + + /* mac addr is u64, so the addr offset is 0x8 */ + addr = HBG_REG_STATION_ADDR_LOW_2_ADDR + (index * 0x8); + hbg_reg_write64(priv, addr, mac_addr); +} + +static void hbg_hw_set_pcu_max_frame_len(struct hbg_priv *priv, + u16 max_frame_len) +{ + max_frame_len = max_t(u32, max_frame_len, ETH_DATA_LEN); + + /* lower two bits of value must be set to 0 */ + max_frame_len = round_up(max_frame_len, HBG_PCU_FRAME_LEN_PLUS); + + hbg_reg_write_field(priv, HBG_REG_MAX_FRAME_LEN_ADDR, + HBG_REG_MAX_FRAME_LEN_M, max_frame_len); +} + +static void hbg_hw_set_mac_max_frame_len(struct hbg_priv *priv, + u16 max_frame_size) +{ + hbg_reg_write_field(priv, HBG_REG_MAX_FRAME_SIZE_ADDR, + HBG_REG_MAX_FRAME_LEN_M, max_frame_size); +} + +void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu) +{ + u32 frame_len; + + frame_len = mtu + VLAN_HLEN * priv->dev_specs.vlan_layers + + ETH_HLEN + ETH_FCS_LEN; + + hbg_hw_set_pcu_max_frame_len(priv, frame_len); + hbg_hw_set_mac_max_frame_len(priv, frame_len); +} + +void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable) +{ + hbg_reg_write_field(priv, HBG_REG_PORT_ENABLE_ADDR, + HBG_REG_PORT_ENABLE_TX_B, enable); + hbg_reg_write_field(priv, HBG_REG_PORT_ENABLE_ADDR, + HBG_REG_PORT_ENABLE_RX_B, enable); +} + +u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir) +{ + if (dir & HBG_DIR_TX) + return hbg_reg_read_field(priv, HBG_REG_CF_CFF_DATA_NUM_ADDR, + HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M); + + if (dir & HBG_DIR_RX) + return hbg_reg_read_field(priv, HBG_REG_CF_CFF_DATA_NUM_ADDR, + HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M); + + return 0; +} + +void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc) +{ + hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_0_ADDR, tx_desc->word0); + hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_1_ADDR, tx_desc->word1); + hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_2_ADDR, tx_desc->word2); + hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_3_ADDR, tx_desc->word3); +} + +void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr) +{ + hbg_reg_write(priv, HBG_REG_RX_CFF_ADDR_ADDR, buffer_dma_addr); +} + +void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex) +{ + hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR, + HBG_REG_PORT_MODE_M, speed); + hbg_reg_write_field(priv, HBG_REG_DUPLEX_TYPE_ADDR, + HBG_REG_DUPLEX_B, duplex); +} + +/* only support uc filter */ +void hbg_hw_set_mac_filter_enable(struct hbg_priv *priv, u32 enable) +{ + hbg_reg_write_field(priv, HBG_REG_REC_FILT_CTRL_ADDR, + HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B, enable); +} + +void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en) +{ + hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR, + HBG_REG_PAUSE_ENABLE_TX_B, tx_en); + hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR, + HBG_REG_PAUSE_ENABLE_RX_B, rx_en); +} + +void hbg_hw_get_pause_enable(struct hbg_priv *priv, u32 *tx_en, u32 *rx_en) +{ + *tx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR, + HBG_REG_PAUSE_ENABLE_TX_B); + *rx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR, + HBG_REG_PAUSE_ENABLE_RX_B); +} + +void hbg_hw_set_rx_pause_mac_addr(struct hbg_priv *priv, u64 mac_addr) +{ + hbg_reg_write64(priv, HBG_REG_FD_FC_ADDR_LOW_ADDR, mac_addr); +} + +static void hbg_hw_init_transmit_ctrl(struct hbg_priv *priv) +{ + u32 ctrl = 0; + + ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_AN_EN_B, HBG_STATUS_ENABLE); + ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_CRC_ADD_B, HBG_STATUS_ENABLE); + ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_PAD_EN_B, HBG_STATUS_ENABLE); + + hbg_reg_write(priv, HBG_REG_TRANSMIT_CTRL_ADDR, ctrl); +} + +static void hbg_hw_init_rx_ctrl(struct hbg_priv *priv) +{ + u32 ctrl = 0; + + ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RX_GET_ADDR_MODE_B, + HBG_STATUS_ENABLE); + ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_TIME_INF_EN_B, HBG_STATUS_DISABLE); + ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE_M, HBG_RX_SKIP1); + ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M, + HBG_RX_SKIP2); + ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RX_ALIGN_NUM_M, NET_IP_ALIGN); + ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_PORT_NUM, priv->dev_specs.mac_id); + + hbg_reg_write(priv, HBG_REG_RX_CTRL_ADDR, ctrl); +} + +static void hbg_hw_init_rx_control(struct hbg_priv *priv) +{ + hbg_hw_init_rx_ctrl(priv); + + /* parse from L2 layer */ + hbg_reg_write_field(priv, HBG_REG_RX_PKT_MODE_ADDR, + HBG_REG_RX_PKT_MODE_PARSE_MODE_M, 0x1); + + hbg_reg_write_field(priv, HBG_REG_RECV_CTRL_ADDR, + HBG_REG_RECV_CTRL_STRIP_PAD_EN_B, + HBG_STATUS_ENABLE); + hbg_reg_write_field(priv, HBG_REG_RX_BUF_SIZE_ADDR, + HBG_REG_RX_BUF_SIZE_M, priv->dev_specs.rx_buf_size); + hbg_reg_write_field(priv, HBG_REG_CF_CRC_STRIP_ADDR, + HBG_REG_CF_CRC_STRIP_B, HBG_STATUS_DISABLE); +} + +int hbg_hw_init(struct hbg_priv *priv) +{ + int ret; + + ret = hbg_hw_dev_specs_init(priv); + if (ret) + return ret; + + hbg_reg_write_field(priv, HBG_REG_BUS_CTRL_ADDR, + HBG_REG_BUS_CTRL_ENDIAN_M, + HBG_ENDIAN_CTRL_LE_DATA_BE); + hbg_reg_write_field(priv, HBG_REG_MODE_CHANGE_EN_ADDR, + HBG_REG_MODE_CHANGE_EN_B, HBG_STATUS_ENABLE); + + hbg_hw_init_rx_control(priv); + hbg_hw_init_transmit_ctrl(priv); + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h new file mode 100644 index 000000000000..a4a049b5121d --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2024 Hisilicon Limited. */ + +#ifndef __HBG_HW_H +#define __HBG_HW_H + +#include <linux/bitfield.h> +#include <linux/io-64-nonatomic-lo-hi.h> + +static inline u32 hbg_reg_read(struct hbg_priv *priv, u32 addr) +{ + return readl(priv->io_base + addr); +} + +static inline void hbg_reg_write(struct hbg_priv *priv, u32 addr, u32 value) +{ + writel(value, priv->io_base + addr); +} + +static inline u64 hbg_reg_read64(struct hbg_priv *priv, u32 addr) +{ + return lo_hi_readq(priv->io_base + addr); +} + +static inline void hbg_reg_write64(struct hbg_priv *priv, u32 addr, u64 value) +{ + lo_hi_writeq(value, priv->io_base + addr); +} + +#define hbg_reg_read_field(priv, addr, mask) \ + FIELD_GET(mask, hbg_reg_read(priv, addr)) + +#define hbg_field_modify(reg_value, mask, value) ({ \ + (reg_value) &= ~(mask); \ + (reg_value) |= FIELD_PREP(mask, value); }) + +#define hbg_reg_write_field(priv, addr, mask, val) ({ \ + typeof(priv) _priv = (priv); \ + typeof(addr) _addr = (addr); \ + u32 _value = hbg_reg_read(_priv, _addr); \ + hbg_field_modify(_value, mask, val); \ + hbg_reg_write(_priv, _addr, _value); }) + +int hbg_hw_event_notify(struct hbg_priv *priv, + enum hbg_hw_event_type event_type); +int hbg_hw_init(struct hbg_priv *priv); +void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex); +u32 hbg_hw_get_irq_status(struct hbg_priv *priv); +void hbg_hw_irq_clear(struct hbg_priv *priv, u32 mask); +bool hbg_hw_irq_is_enabled(struct hbg_priv *priv, u32 mask); +void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable); +void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu); +void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable); +void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr, u32 index); +u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir); +void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc); +void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr); +void hbg_hw_set_mac_filter_enable(struct hbg_priv *priv, u32 enable); +void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en); +void hbg_hw_get_pause_enable(struct hbg_priv *priv, u32 *tx_en, u32 *rx_en); +void hbg_hw_set_rx_pause_mac_addr(struct hbg_priv *priv, u64 mac_addr); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c new file mode 100644 index 000000000000..25dd25f096fe --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2024 Hisilicon Limited. + +#include <linux/interrupt.h> +#include "hbg_irq.h" +#include "hbg_hw.h" + +static void hbg_irq_handle_err(struct hbg_priv *priv, + struct hbg_irq_info *irq_info) +{ + if (irq_info->need_print) + dev_err(&priv->pdev->dev, + "receive error interrupt: %s\n", irq_info->name); +} + +static void hbg_irq_handle_tx(struct hbg_priv *priv, + struct hbg_irq_info *irq_info) +{ + napi_schedule(&priv->tx_ring.napi); +} + +static void hbg_irq_handle_rx(struct hbg_priv *priv, + struct hbg_irq_info *irq_info) +{ + napi_schedule(&priv->rx_ring.napi); +} + +#define HBG_TXRX_IRQ_I(name, handle) \ + {#name, HBG_INT_MSK_##name##_B, false, false, 0, handle} +#define HBG_ERR_IRQ_I(name, need_print) \ + {#name, HBG_INT_MSK_##name##_B, true, need_print, 0, hbg_irq_handle_err} + +static struct hbg_irq_info hbg_irqs[] = { + HBG_TXRX_IRQ_I(RX, hbg_irq_handle_rx), + HBG_TXRX_IRQ_I(TX, hbg_irq_handle_tx), + HBG_ERR_IRQ_I(MAC_MII_FIFO_ERR, true), + HBG_ERR_IRQ_I(MAC_PCS_RX_FIFO_ERR, true), + HBG_ERR_IRQ_I(MAC_PCS_TX_FIFO_ERR, true), + HBG_ERR_IRQ_I(MAC_APP_RX_FIFO_ERR, true), + HBG_ERR_IRQ_I(MAC_APP_TX_FIFO_ERR, true), + HBG_ERR_IRQ_I(SRAM_PARITY_ERR, true), + HBG_ERR_IRQ_I(TX_AHB_ERR, true), + HBG_ERR_IRQ_I(RX_BUF_AVL, false), + HBG_ERR_IRQ_I(REL_BUF_ERR, true), + HBG_ERR_IRQ_I(TXCFG_AVL, false), + HBG_ERR_IRQ_I(TX_DROP, false), + HBG_ERR_IRQ_I(RX_DROP, false), + HBG_ERR_IRQ_I(RX_AHB_ERR, true), + HBG_ERR_IRQ_I(MAC_FIFO_ERR, false), + HBG_ERR_IRQ_I(RBREQ_ERR, false), + HBG_ERR_IRQ_I(WE_ERR, false), +}; + +static irqreturn_t hbg_irq_handle(int irq_num, void *p) +{ + struct hbg_irq_info *info; + struct hbg_priv *priv = p; + u32 status; + u32 i; + + status = hbg_hw_get_irq_status(priv); + for (i = 0; i < priv->vectors.info_array_len; i++) { + info = &priv->vectors.info_array[i]; + if (status & info->mask) { + if (!hbg_hw_irq_is_enabled(priv, info->mask)) + continue; + + hbg_hw_irq_enable(priv, info->mask, false); + hbg_hw_irq_clear(priv, info->mask); + + info->count++; + if (info->irq_handle) + info->irq_handle(priv, info); + + if (info->re_enable) + hbg_hw_irq_enable(priv, info->mask, true); + } + } + + return IRQ_HANDLED; +} + +static const char *irq_names_map[HBG_VECTOR_NUM] = { "tx", "rx", + "err", "mdio" }; + +int hbg_irq_init(struct hbg_priv *priv) +{ + struct hbg_vector *vectors = &priv->vectors; + struct device *dev = &priv->pdev->dev; + int ret, id; + u32 i; + + /* used pcim_enable_device(), so the vectors become device managed */ + ret = pci_alloc_irq_vectors(priv->pdev, HBG_VECTOR_NUM, HBG_VECTOR_NUM, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (ret < 0) + return dev_err_probe(dev, ret, "failed to allocate vectors\n"); + + if (ret != HBG_VECTOR_NUM) + return dev_err_probe(dev, -EINVAL, + "requested %u MSI, but allocated %d MSI\n", + HBG_VECTOR_NUM, ret); + + /* mdio irq not requested, so the number of requested interrupts + * is HBG_VECTOR_NUM - 1. + */ + for (i = 0; i < HBG_VECTOR_NUM - 1; i++) { + id = pci_irq_vector(priv->pdev, i); + if (id < 0) + return dev_err_probe(dev, id, "failed to get irq id\n"); + + snprintf(vectors->name[i], sizeof(vectors->name[i]), "%s-%s-%s", + dev_driver_string(dev), pci_name(priv->pdev), + irq_names_map[i]); + + ret = devm_request_irq(dev, id, hbg_irq_handle, 0, + vectors->name[i], priv); + if (ret) + return dev_err_probe(dev, ret, + "failed to request irq: %s\n", + irq_names_map[i]); + } + + vectors->info_array = hbg_irqs; + vectors->info_array_len = ARRAY_SIZE(hbg_irqs); + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.h new file mode 100644 index 000000000000..5c5323cfc751 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2024 Hisilicon Limited. */ + +#ifndef __HBG_IRQ_H +#define __HBG_IRQ_H + +#include "hbg_common.h" + +int hbg_irq_init(struct hbg_priv *priv); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c new file mode 100644 index 000000000000..bb0f25ac9760 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c @@ -0,0 +1,408 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2024 Hisilicon Limited. + +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include "hbg_common.h" +#include "hbg_err.h" +#include "hbg_ethtool.h" +#include "hbg_hw.h" +#include "hbg_irq.h" +#include "hbg_mdio.h" +#include "hbg_txrx.h" +#include "hbg_debugfs.h" + +static void hbg_all_irq_enable(struct hbg_priv *priv, bool enabled) +{ + struct hbg_irq_info *info; + u32 i; + + for (i = 0; i < priv->vectors.info_array_len; i++) { + info = &priv->vectors.info_array[i]; + hbg_hw_irq_enable(priv, info->mask, enabled); + } +} + +static int hbg_net_open(struct net_device *netdev) +{ + struct hbg_priv *priv = netdev_priv(netdev); + int ret; + + ret = hbg_txrx_init(priv); + if (ret) + return ret; + + hbg_all_irq_enable(priv, true); + hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE); + netif_start_queue(netdev); + hbg_phy_start(priv); + + return 0; +} + +/* This function only can be called after hbg_txrx_uninit() */ +static int hbg_hw_txrx_clear(struct hbg_priv *priv) +{ + int ret; + + /* After ring buffers have been released, + * do a reset to release hw fifo rx ring buffer + */ + ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_RESET); + if (ret) + return ret; + + /* After reset, regs need to be reconfigured */ + return hbg_rebuild(priv); +} + +static int hbg_net_stop(struct net_device *netdev) +{ + struct hbg_priv *priv = netdev_priv(netdev); + + hbg_phy_stop(priv); + netif_stop_queue(netdev); + hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE); + hbg_all_irq_enable(priv, false); + hbg_txrx_uninit(priv); + return hbg_hw_txrx_clear(priv); +} + +static void hbg_update_promisc_mode(struct net_device *netdev, bool overflow) +{ + struct hbg_priv *priv = netdev_priv(netdev); + + /* Only when not table_overflow, and netdev->flags not set IFF_PROMISC, + * The MAC filter will be enabled. + * Otherwise the filter will be disabled. + */ + priv->filter.enabled = !(overflow || (netdev->flags & IFF_PROMISC)); + hbg_hw_set_mac_filter_enable(priv, priv->filter.enabled); +} + +static void hbg_set_mac_to_mac_table(struct hbg_priv *priv, + u32 index, const u8 *addr) +{ + if (addr) { + ether_addr_copy(priv->filter.mac_table[index].addr, addr); + hbg_hw_set_uc_addr(priv, ether_addr_to_u64(addr), index); + } else { + eth_zero_addr(priv->filter.mac_table[index].addr); + hbg_hw_set_uc_addr(priv, 0, index); + } +} + +static int hbg_get_index_from_mac_table(struct hbg_priv *priv, + const u8 *addr, u32 *index) +{ + u32 i; + + for (i = 0; i < priv->filter.table_max_len; i++) + if (ether_addr_equal(priv->filter.mac_table[i].addr, addr)) { + *index = i; + return 0; + } + + return -EINVAL; +} + +static int hbg_add_mac_to_filter(struct hbg_priv *priv, const u8 *addr) +{ + u32 index; + + /* already exists */ + if (!hbg_get_index_from_mac_table(priv, addr, &index)) + return 0; + + for (index = 0; index < priv->filter.table_max_len; index++) + if (is_zero_ether_addr(priv->filter.mac_table[index].addr)) { + hbg_set_mac_to_mac_table(priv, index, addr); + return 0; + } + + return -ENOSPC; +} + +static void hbg_del_mac_from_filter(struct hbg_priv *priv, const u8 *addr) +{ + u32 index; + + /* not exists */ + if (hbg_get_index_from_mac_table(priv, addr, &index)) + return; + + hbg_set_mac_to_mac_table(priv, index, NULL); +} + +static int hbg_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct hbg_priv *priv = netdev_priv(netdev); + + return hbg_add_mac_to_filter(priv, addr); +} + +static int hbg_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct hbg_priv *priv = netdev_priv(netdev); + + if (ether_addr_equal(netdev->dev_addr, (u8 *)addr)) + return 0; + + hbg_del_mac_from_filter(priv, addr); + return 0; +} + +static void hbg_net_set_rx_mode(struct net_device *netdev) +{ + int ret; + + ret = __dev_uc_sync(netdev, hbg_uc_sync, hbg_uc_unsync); + + /* If ret != 0, overflow has occurred */ + hbg_update_promisc_mode(netdev, !!ret); +} + +static int hbg_net_set_mac_address(struct net_device *netdev, void *addr) +{ + struct hbg_priv *priv = netdev_priv(netdev); + u8 *mac_addr; + bool exists; + u32 index; + + mac_addr = ((struct sockaddr *)addr)->sa_data; + + if (!is_valid_ether_addr(mac_addr)) + return -EADDRNOTAVAIL; + + /* The index of host mac is always 0. + * If new mac address already exists, + * delete the existing mac address and + * add it to the position with index 0. + */ + exists = !hbg_get_index_from_mac_table(priv, mac_addr, &index); + hbg_set_mac_to_mac_table(priv, 0, mac_addr); + if (exists) + hbg_set_mac_to_mac_table(priv, index, NULL); + + hbg_hw_set_rx_pause_mac_addr(priv, ether_addr_to_u64(mac_addr)); + dev_addr_set(netdev, mac_addr); + return 0; +} + +static int hbg_net_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct hbg_priv *priv = netdev_priv(netdev); + + if (netif_running(netdev)) + return -EBUSY; + + hbg_hw_set_mtu(priv, new_mtu); + WRITE_ONCE(netdev->mtu, new_mtu); + + dev_dbg(&priv->pdev->dev, + "change mtu from %u to %u\n", netdev->mtu, new_mtu); + + return 0; +} + +static void hbg_net_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct hbg_priv *priv = netdev_priv(netdev); + struct hbg_ring *ring = &priv->tx_ring; + char *buf = ring->tout_log_buf; + u32 pos = 0; + + pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos, + "ring used num: %u, fifo used num: %u\n", + hbg_get_queue_used_num(ring), + hbg_hw_get_fifo_used_num(priv, HBG_DIR_TX)); + pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos, + "ntc: %u, ntu: %u, irq enabled: %u\n", + ring->ntc, ring->ntu, + hbg_hw_irq_is_enabled(priv, HBG_INT_MSK_TX_B)); + + netdev_info(netdev, "%s", buf); +} + +static const struct net_device_ops hbg_netdev_ops = { + .ndo_open = hbg_net_open, + .ndo_stop = hbg_net_stop, + .ndo_start_xmit = hbg_net_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = hbg_net_set_mac_address, + .ndo_change_mtu = hbg_net_change_mtu, + .ndo_tx_timeout = hbg_net_tx_timeout, + .ndo_set_rx_mode = hbg_net_set_rx_mode, +}; + +static int hbg_mac_filter_init(struct hbg_priv *priv) +{ + struct hbg_dev_specs *dev_specs = &priv->dev_specs; + struct hbg_mac_filter *filter = &priv->filter; + struct hbg_mac_table_entry *tmp_table; + + tmp_table = devm_kcalloc(&priv->pdev->dev, dev_specs->uc_mac_num, + sizeof(*tmp_table), GFP_KERNEL); + if (!tmp_table) + return -ENOMEM; + + filter->mac_table = tmp_table; + filter->table_max_len = dev_specs->uc_mac_num; + filter->enabled = true; + + hbg_hw_set_mac_filter_enable(priv, filter->enabled); + return 0; +} + +static void hbg_init_user_def(struct hbg_priv *priv) +{ + struct ethtool_pauseparam *pause_param = &priv->user_def.pause_param; + + priv->mac.pause_autoneg = HBG_STATUS_ENABLE; + + pause_param->autoneg = priv->mac.pause_autoneg; + hbg_hw_get_pause_enable(priv, &pause_param->tx_pause, + &pause_param->rx_pause); +} + +static int hbg_init(struct hbg_priv *priv) +{ + int ret; + + ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_INIT); + if (ret) + return ret; + + ret = hbg_hw_init(priv); + if (ret) + return ret; + + ret = hbg_irq_init(priv); + if (ret) + return ret; + + ret = hbg_mdio_init(priv); + if (ret) + return ret; + + ret = hbg_mac_filter_init(priv); + if (ret) + return ret; + + hbg_debugfs_init(priv); + hbg_init_user_def(priv); + return 0; +} + +static int hbg_pci_init(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct hbg_priv *priv = netdev_priv(netdev); + struct device *dev = &pdev->dev; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return dev_err_probe(dev, ret, "failed to enable PCI device\n"); + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) + return dev_err_probe(dev, ret, "failed to set PCI DMA mask\n"); + + ret = pcim_iomap_regions(pdev, BIT(0), dev_driver_string(dev)); + if (ret) + return dev_err_probe(dev, ret, "failed to map PCI bar space\n"); + + priv->io_base = pcim_iomap_table(pdev)[0]; + if (!priv->io_base) + return dev_err_probe(dev, -ENOMEM, "failed to get io base\n"); + + pci_set_master(pdev); + return 0; +} + +static int hbg_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct device *dev = &pdev->dev; + struct net_device *netdev; + struct hbg_priv *priv; + int ret; + + netdev = devm_alloc_etherdev(dev, sizeof(struct hbg_priv)); + if (!netdev) + return -ENOMEM; + + pci_set_drvdata(pdev, netdev); + SET_NETDEV_DEV(netdev, dev); + + priv = netdev_priv(netdev); + priv->netdev = netdev; + priv->pdev = pdev; + + ret = hbg_pci_init(pdev); + if (ret) + return ret; + + ret = hbg_init(priv); + if (ret) + return ret; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + netdev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; + netdev->max_mtu = priv->dev_specs.max_mtu; + netdev->min_mtu = priv->dev_specs.min_mtu; + netdev->netdev_ops = &hbg_netdev_ops; + netdev->watchdog_timeo = 5 * HZ; + + hbg_hw_set_mtu(priv, ETH_DATA_LEN); + hbg_net_set_mac_address(priv->netdev, &priv->dev_specs.mac_addr); + hbg_ethtool_set_ops(netdev); + + ret = devm_register_netdev(dev, netdev); + if (ret) + return dev_err_probe(dev, ret, "failed to register netdev\n"); + + netif_carrier_off(netdev); + return 0; +} + +static const struct pci_device_id hbg_pci_tbl[] = { + {PCI_VDEVICE(HUAWEI, 0x3730), 0}, + { } +}; +MODULE_DEVICE_TABLE(pci, hbg_pci_tbl); + +static struct pci_driver hbg_driver = { + .name = "hibmcge", + .id_table = hbg_pci_tbl, + .probe = hbg_probe, +}; + +static int __init hbg_module_init(void) +{ + int ret; + + hbg_debugfs_register(); + hbg_set_pci_err_handler(&hbg_driver); + ret = pci_register_driver(&hbg_driver); + if (ret) + hbg_debugfs_unregister(); + + return ret; +} +module_init(hbg_module_init); + +static void __exit hbg_module_exit(void) +{ + pci_unregister_driver(&hbg_driver); + hbg_debugfs_unregister(); +} +module_exit(hbg_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); +MODULE_DESCRIPTION("hibmcge driver"); +MODULE_VERSION("1.0"); diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c new file mode 100644 index 000000000000..db6bc4cfb971 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2024 Hisilicon Limited. + +#include <linux/phy.h> +#include "hbg_common.h" +#include "hbg_hw.h" +#include "hbg_mdio.h" +#include "hbg_reg.h" + +#define HBG_MAC_GET_PRIV(mac) ((struct hbg_priv *)(mac)->mdio_bus->priv) +#define HBG_MII_BUS_GET_MAC(bus) (&((struct hbg_priv *)(bus)->priv)->mac) + +#define HBG_MDIO_C22_MODE 0x1 +#define HBG_MDIO_C22_REG_WRITE 0x1 +#define HBG_MDIO_C22_REG_READ 0x2 + +#define HBG_MDIO_OP_TIMEOUT_US (1 * 1000 * 1000) +#define HBG_MDIO_OP_INTERVAL_US (5 * 1000) + +static void hbg_mdio_set_command(struct hbg_mac *mac, u32 cmd) +{ + hbg_reg_write(HBG_MAC_GET_PRIV(mac), HBG_REG_MDIO_COMMAND_ADDR, cmd); +} + +static void hbg_mdio_get_command(struct hbg_mac *mac, u32 *cmd) +{ + *cmd = hbg_reg_read(HBG_MAC_GET_PRIV(mac), HBG_REG_MDIO_COMMAND_ADDR); +} + +static void hbg_mdio_set_wdata_reg(struct hbg_mac *mac, u16 wdata_value) +{ + hbg_reg_write_field(HBG_MAC_GET_PRIV(mac), HBG_REG_MDIO_WDATA_ADDR, + HBG_REG_MDIO_WDATA_M, wdata_value); +} + +static u32 hbg_mdio_get_rdata_reg(struct hbg_mac *mac) +{ + return hbg_reg_read_field(HBG_MAC_GET_PRIV(mac), + HBG_REG_MDIO_RDATA_ADDR, + HBG_REG_MDIO_WDATA_M); +} + +static int hbg_mdio_wait_ready(struct hbg_mac *mac) +{ + struct hbg_priv *priv = HBG_MAC_GET_PRIV(mac); + u32 cmd = 0; + int ret; + + ret = readl_poll_timeout(priv->io_base + HBG_REG_MDIO_COMMAND_ADDR, cmd, + !FIELD_GET(HBG_REG_MDIO_COMMAND_START_B, cmd), + HBG_MDIO_OP_INTERVAL_US, + HBG_MDIO_OP_TIMEOUT_US); + + return ret ? -ETIMEDOUT : 0; +} + +static int hbg_mdio_cmd_send(struct hbg_mac *mac, u32 prt_addr, u32 dev_addr, + u32 type, u32 op_code) +{ + u32 cmd = 0; + + hbg_mdio_get_command(mac, &cmd); + hbg_field_modify(cmd, HBG_REG_MDIO_COMMAND_ST_M, type); + hbg_field_modify(cmd, HBG_REG_MDIO_COMMAND_OP_M, op_code); + hbg_field_modify(cmd, HBG_REG_MDIO_COMMAND_PRTAD_M, prt_addr); + hbg_field_modify(cmd, HBG_REG_MDIO_COMMAND_DEVAD_M, dev_addr); + + /* if auto scan enabled, this value need fix to 0 */ + hbg_field_modify(cmd, HBG_REG_MDIO_COMMAND_START_B, 0x1); + + hbg_mdio_set_command(mac, cmd); + + /* wait operation complete and check the result */ + return hbg_mdio_wait_ready(mac); +} + +static int hbg_mdio_read22(struct mii_bus *bus, int phy_addr, int regnum) +{ + struct hbg_mac *mac = HBG_MII_BUS_GET_MAC(bus); + int ret; + + ret = hbg_mdio_cmd_send(mac, phy_addr, regnum, HBG_MDIO_C22_MODE, + HBG_MDIO_C22_REG_READ); + if (ret) + return ret; + + return hbg_mdio_get_rdata_reg(mac); +} + +static int hbg_mdio_write22(struct mii_bus *bus, int phy_addr, int regnum, + u16 val) +{ + struct hbg_mac *mac = HBG_MII_BUS_GET_MAC(bus); + + hbg_mdio_set_wdata_reg(mac, val); + return hbg_mdio_cmd_send(mac, phy_addr, regnum, HBG_MDIO_C22_MODE, + HBG_MDIO_C22_REG_WRITE); +} + +static void hbg_mdio_init_hw(struct hbg_priv *priv) +{ + u32 freq = priv->dev_specs.mdio_frequency; + struct hbg_mac *mac = &priv->mac; + u32 cmd = 0; + + cmd |= FIELD_PREP(HBG_REG_MDIO_COMMAND_ST_M, HBG_MDIO_C22_MODE); + cmd |= FIELD_PREP(HBG_REG_MDIO_COMMAND_AUTO_SCAN_B, HBG_STATUS_DISABLE); + + /* freq use two bits, which are stored in clk_sel and clk_sel_exp */ + cmd |= FIELD_PREP(HBG_REG_MDIO_COMMAND_CLK_SEL_B, freq & 0x1); + cmd |= FIELD_PREP(HBG_REG_MDIO_COMMAND_CLK_SEL_EXP_B, + (freq >> 1) & 0x1); + + hbg_mdio_set_command(mac, cmd); +} + +static void hbg_flowctrl_cfg(struct hbg_priv *priv) +{ + struct phy_device *phydev = priv->mac.phydev; + bool rx_pause; + bool tx_pause; + + if (!priv->mac.pause_autoneg) + return; + + phy_get_pause(phydev, &tx_pause, &rx_pause); + hbg_hw_set_pause_enable(priv, tx_pause, rx_pause); +} + +static void hbg_phy_adjust_link(struct net_device *netdev) +{ + struct hbg_priv *priv = netdev_priv(netdev); + struct phy_device *phydev = netdev->phydev; + u32 speed; + + if (phydev->link != priv->mac.link_status) { + if (phydev->link) { + switch (phydev->speed) { + case SPEED_10: + speed = HBG_PORT_MODE_SGMII_10M; + break; + case SPEED_100: + speed = HBG_PORT_MODE_SGMII_100M; + break; + case SPEED_1000: + speed = HBG_PORT_MODE_SGMII_1000M; + break; + default: + return; + } + + priv->mac.speed = speed; + priv->mac.duplex = phydev->duplex; + priv->mac.autoneg = phydev->autoneg; + hbg_hw_adjust_link(priv, speed, phydev->duplex); + hbg_flowctrl_cfg(priv); + } + + priv->mac.link_status = phydev->link; + phy_print_status(phydev); + } +} + +static void hbg_phy_disconnect(void *data) +{ + phy_disconnect((struct phy_device *)data); +} + +static int hbg_phy_connect(struct hbg_priv *priv) +{ + struct phy_device *phydev = priv->mac.phydev; + struct device *dev = &priv->pdev->dev; + int ret; + + ret = phy_connect_direct(priv->netdev, phydev, hbg_phy_adjust_link, + PHY_INTERFACE_MODE_SGMII); + if (ret) + return dev_err_probe(dev, ret, "failed to connect phy\n"); + + ret = devm_add_action_or_reset(dev, hbg_phy_disconnect, phydev); + if (ret) + return ret; + + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + phy_support_asym_pause(phydev); + phy_attached_info(phydev); + + return 0; +} + +void hbg_phy_start(struct hbg_priv *priv) +{ + phy_start(priv->mac.phydev); +} + +void hbg_phy_stop(struct hbg_priv *priv) +{ + phy_stop(priv->mac.phydev); +} + +int hbg_mdio_init(struct hbg_priv *priv) +{ + struct device *dev = &priv->pdev->dev; + struct hbg_mac *mac = &priv->mac; + struct phy_device *phydev; + struct mii_bus *mdio_bus; + int ret; + + mac->phy_addr = priv->dev_specs.phy_addr; + mdio_bus = devm_mdiobus_alloc(dev); + if (!mdio_bus) + return dev_err_probe(dev, -ENOMEM, + "failed to alloc MDIO bus\n"); + + mdio_bus->parent = dev; + mdio_bus->priv = priv; + mdio_bus->phy_mask = ~(1 << mac->phy_addr); + mdio_bus->name = "hibmcge mii bus"; + mac->mdio_bus = mdio_bus; + + mdio_bus->read = hbg_mdio_read22; + mdio_bus->write = hbg_mdio_write22; + snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "mii", dev_name(dev)); + + ret = devm_mdiobus_register(dev, mdio_bus); + if (ret) + return dev_err_probe(dev, ret, "failed to register MDIO bus\n"); + + phydev = mdiobus_get_phy(mdio_bus, mac->phy_addr); + if (!phydev) + return dev_err_probe(dev, -ENODEV, + "failed to get phy device\n"); + + mac->phydev = phydev; + hbg_mdio_init_hw(priv); + return hbg_phy_connect(priv); +} diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h new file mode 100644 index 000000000000..febd02a309c7 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2024 Hisilicon Limited. */ + +#ifndef __HBG_MDIO_H +#define __HBG_MDIO_H + +#include "hbg_common.h" + +int hbg_mdio_init(struct hbg_priv *priv); +void hbg_phy_start(struct hbg_priv *priv); +void hbg_phy_stop(struct hbg_priv *priv); +#endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h new file mode 100644 index 000000000000..f12efc12f3c5 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h @@ -0,0 +1,182 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2024 Hisilicon Limited. */ + +#ifndef __HBG_REG_H +#define __HBG_REG_H + +/* DEV SPEC */ +#define HBG_REG_SPEC_VALID_ADDR 0x0000 +#define HBG_REG_EVENT_REQ_ADDR 0x0004 +#define HBG_REG_MAC_ID_ADDR 0x0008 +#define HBG_REG_PHY_ID_ADDR 0x000C +#define HBG_REG_MAC_ADDR_ADDR 0x0010 +#define HBG_REG_MAC_ADDR_HIGH_ADDR 0x0014 +#define HBG_REG_UC_MAC_NUM_ADDR 0x0018 +#define HBG_REG_MDIO_FREQ_ADDR 0x0024 +#define HBG_REG_MAX_MTU_ADDR 0x0028 +#define HBG_REG_MIN_MTU_ADDR 0x002C +#define HBG_REG_TX_FIFO_NUM_ADDR 0x0030 +#define HBG_REG_RX_FIFO_NUM_ADDR 0x0034 +#define HBG_REG_VLAN_LAYERS_ADDR 0x0038 + +/* MDIO */ +#define HBG_REG_MDIO_BASE 0x8000 +#define HBG_REG_MDIO_COMMAND_ADDR (HBG_REG_MDIO_BASE + 0x0000) +#define HBG_REG_MDIO_COMMAND_CLK_SEL_EXP_B BIT(17) +#define HBG_REG_MDIO_COMMAND_AUTO_SCAN_B BIT(16) +#define HBG_REG_MDIO_COMMAND_CLK_SEL_B BIT(15) +#define HBG_REG_MDIO_COMMAND_START_B BIT(14) +#define HBG_REG_MDIO_COMMAND_ST_M GENMASK(13, 12) +#define HBG_REG_MDIO_COMMAND_OP_M GENMASK(11, 10) +#define HBG_REG_MDIO_COMMAND_PRTAD_M GENMASK(9, 5) +#define HBG_REG_MDIO_COMMAND_DEVAD_M GENMASK(4, 0) +#define HBG_REG_MDIO_ADDR_ADDR (HBG_REG_MDIO_BASE + 0x0004) +#define HBG_REG_MDIO_WDATA_ADDR (HBG_REG_MDIO_BASE + 0x0008) +#define HBG_REG_MDIO_WDATA_M GENMASK(15, 0) +#define HBG_REG_MDIO_RDATA_ADDR (HBG_REG_MDIO_BASE + 0x000C) +#define HBG_REG_MDIO_STA_ADDR (HBG_REG_MDIO_BASE + 0x0010) + +/* GMAC */ +#define HBG_REG_SGMII_BASE 0x10000 +#define HBG_REG_DUPLEX_TYPE_ADDR (HBG_REG_SGMII_BASE + 0x0008) +#define HBG_REG_FD_FC_TYPE_ADDR (HBG_REG_SGMII_BASE + 0x000C) +#define HBG_REG_FC_TX_TIMER_ADDR (HBG_REG_SGMII_BASE + 0x001C) +#define HBG_REG_FD_FC_ADDR_LOW_ADDR (HBG_REG_SGMII_BASE + 0x0020) +#define HBG_REG_FD_FC_ADDR_HIGH_ADDR (HBG_REG_SGMII_BASE + 0x0024) +#define HBG_REG_DUPLEX_B BIT(0) +#define HBG_REG_MAX_FRAME_SIZE_ADDR (HBG_REG_SGMII_BASE + 0x003C) +#define HBG_REG_PORT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x0040) +#define HBG_REG_PORT_MODE_M GENMASK(3, 0) +#define HBG_REG_PORT_ENABLE_ADDR (HBG_REG_SGMII_BASE + 0x0044) +#define HBG_REG_PORT_ENABLE_RX_B BIT(1) +#define HBG_REG_PORT_ENABLE_TX_B BIT(2) +#define HBG_REG_PAUSE_ENABLE_ADDR (HBG_REG_SGMII_BASE + 0x0048) +#define HBG_REG_PAUSE_ENABLE_RX_B BIT(0) +#define HBG_REG_PAUSE_ENABLE_TX_B BIT(1) +#define HBG_REG_AN_NEG_STATE_ADDR (HBG_REG_SGMII_BASE + 0x0058) +#define HBG_REG_TRANSMIT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0060) +#define HBG_REG_TRANSMIT_CTRL_PAD_EN_B BIT(7) +#define HBG_REG_TRANSMIT_CTRL_CRC_ADD_B BIT(6) +#define HBG_REG_TRANSMIT_CTRL_AN_EN_B BIT(5) +#define HBG_REG_REC_FILT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0064) +#define HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B BIT(0) +#define HBG_REG_LINE_LOOP_BACK_ADDR (HBG_REG_SGMII_BASE + 0x01A8) +#define HBG_REG_CF_CRC_STRIP_ADDR (HBG_REG_SGMII_BASE + 0x01B0) +#define HBG_REG_CF_CRC_STRIP_B BIT(0) +#define HBG_REG_MODE_CHANGE_EN_ADDR (HBG_REG_SGMII_BASE + 0x01B4) +#define HBG_REG_MODE_CHANGE_EN_B BIT(0) +#define HBG_REG_LOOP_REG_ADDR (HBG_REG_SGMII_BASE + 0x01DC) +#define HBG_REG_RECV_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x01E0) +#define HBG_REG_RECV_CTRL_STRIP_PAD_EN_B BIT(3) +#define HBG_REG_VLAN_CODE_ADDR (HBG_REG_SGMII_BASE + 0x01E8) +#define HBG_REG_STATION_ADDR_LOW_0_ADDR (HBG_REG_SGMII_BASE + 0x0200) +#define HBG_REG_STATION_ADDR_HIGH_0_ADDR (HBG_REG_SGMII_BASE + 0x0204) +#define HBG_REG_STATION_ADDR_LOW_1_ADDR (HBG_REG_SGMII_BASE + 0x0208) +#define HBG_REG_STATION_ADDR_HIGH_1_ADDR (HBG_REG_SGMII_BASE + 0x020C) +#define HBG_REG_STATION_ADDR_LOW_2_ADDR (HBG_REG_SGMII_BASE + 0x0210) +#define HBG_REG_STATION_ADDR_HIGH_2_ADDR (HBG_REG_SGMII_BASE + 0x0214) +#define HBG_REG_STATION_ADDR_LOW_3_ADDR (HBG_REG_SGMII_BASE + 0x0218) +#define HBG_REG_STATION_ADDR_HIGH_3_ADDR (HBG_REG_SGMII_BASE + 0x021C) +#define HBG_REG_STATION_ADDR_LOW_4_ADDR (HBG_REG_SGMII_BASE + 0x0220) +#define HBG_REG_STATION_ADDR_HIGH_4_ADDR (HBG_REG_SGMII_BASE + 0x0224) +#define HBG_REG_STATION_ADDR_LOW_5_ADDR (HBG_REG_SGMII_BASE + 0x0228) +#define HBG_REG_STATION_ADDR_HIGH_5_ADDR (HBG_REG_SGMII_BASE + 0x022C) + +/* PCU */ +#define HBG_REG_TX_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0420) +#define HBG_REG_RX_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0424) +#define HBG_REG_CFG_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0428) +#define HBG_REG_CF_INTRPT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x042C) +#define HBG_INT_MSK_WE_ERR_B BIT(31) +#define HBG_INT_MSK_RBREQ_ERR_B BIT(30) +#define HBG_INT_MSK_MAC_FIFO_ERR_B BIT(29) +#define HBG_INT_MSK_RX_AHB_ERR_B BIT(28) +#define HBG_INT_MSK_RX_DROP_B BIT(26) +#define HBG_INT_MSK_TX_DROP_B BIT(25) +#define HBG_INT_MSK_TXCFG_AVL_B BIT(24) +#define HBG_INT_MSK_REL_BUF_ERR_B BIT(23) +#define HBG_INT_MSK_RX_BUF_AVL_B BIT(22) +#define HBG_INT_MSK_TX_AHB_ERR_B BIT(21) +#define HBG_INT_MSK_SRAM_PARITY_ERR_B BIT(20) +#define HBG_INT_MSK_MAC_APP_TX_FIFO_ERR_B BIT(19) +#define HBG_INT_MSK_MAC_APP_RX_FIFO_ERR_B BIT(18) +#define HBG_INT_MSK_MAC_PCS_TX_FIFO_ERR_B BIT(17) +#define HBG_INT_MSK_MAC_PCS_RX_FIFO_ERR_B BIT(16) +#define HBG_INT_MSK_MAC_MII_FIFO_ERR_B BIT(15) +#define HBG_INT_MSK_TX_B BIT(1) /* just used in driver */ +#define HBG_INT_MSK_RX_B BIT(0) /* just used in driver */ +#define HBG_REG_CF_INTRPT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0434) +#define HBG_REG_CF_INTRPT_CLR_ADDR (HBG_REG_SGMII_BASE + 0x0438) +#define HBG_REG_TX_BUS_ERR_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x043C) +#define HBG_REG_RX_BUS_ERR_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x0440) +#define HBG_REG_MAX_FRAME_LEN_ADDR (HBG_REG_SGMII_BASE + 0x0444) +#define HBG_REG_MAX_FRAME_LEN_M GENMASK(15, 0) +#define HBG_REG_DEBUG_ST_MCH_ADDR (HBG_REG_SGMII_BASE + 0x0450) +#define HBG_REG_FIFO_CURR_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0454) +#define HBG_REG_FIFO_HIST_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0458) +#define HBG_REG_CF_CFF_DATA_NUM_ADDR (HBG_REG_SGMII_BASE + 0x045C) +#define HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M GENMASK(8, 0) +#define HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M GENMASK(24, 16) +#define HBG_REG_CF_TX_PAUSE_ADDR (HBG_REG_SGMII_BASE + 0x0470) +#define HBG_REG_TX_CFF_ADDR_0_ADDR (HBG_REG_SGMII_BASE + 0x0488) +#define HBG_REG_TX_CFF_ADDR_1_ADDR (HBG_REG_SGMII_BASE + 0x048C) +#define HBG_REG_TX_CFF_ADDR_2_ADDR (HBG_REG_SGMII_BASE + 0x0490) +#define HBG_REG_TX_CFF_ADDR_3_ADDR (HBG_REG_SGMII_BASE + 0x0494) +#define HBG_REG_RX_CFF_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x04A0) +#define HBG_REG_RX_BUF_SIZE_ADDR (HBG_REG_SGMII_BASE + 0x04E4) +#define HBG_REG_RX_BUF_SIZE_M GENMASK(15, 0) +#define HBG_REG_BUS_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x04E8) +#define HBG_REG_BUS_CTRL_ENDIAN_M GENMASK(2, 1) +#define HBG_REG_RX_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x04F0) +#define HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE_M GENMASK(31, 28) +#define HBG_REG_RX_CTRL_TIME_INF_EN_B BIT(23) +#define HBG_REG_RX_CTRL_RX_ALIGN_NUM_M GENMASK(18, 17) +#define HBG_REG_RX_CTRL_PORT_NUM GENMASK(16, 13) +#define HBG_REG_RX_CTRL_RX_GET_ADDR_MODE_B BIT(12) +#define HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M GENMASK(3, 0) +#define HBG_REG_RX_PKT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x04F4) +#define HBG_REG_RX_PKT_MODE_PARSE_MODE_M GENMASK(22, 21) +#define HBG_REG_DBG_ST0_ADDR (HBG_REG_SGMII_BASE + 0x05E4) +#define HBG_REG_DBG_ST1_ADDR (HBG_REG_SGMII_BASE + 0x05E8) +#define HBG_REG_DBG_ST2_ADDR (HBG_REG_SGMII_BASE + 0x05EC) +#define HBG_REG_BUS_RST_EN_ADDR (HBG_REG_SGMII_BASE + 0x0688) +#define HBG_REG_CF_IND_TXINT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x0694) +#define HBG_REG_IND_INTR_MASK_B BIT(0) +#define HBG_REG_CF_IND_TXINT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0698) +#define HBG_REG_CF_IND_TXINT_CLR_ADDR (HBG_REG_SGMII_BASE + 0x069C) +#define HBG_REG_CF_IND_RXINT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x06a0) +#define HBG_REG_CF_IND_RXINT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x06a4) +#define HBG_REG_CF_IND_RXINT_CLR_ADDR (HBG_REG_SGMII_BASE + 0x06a8) + +enum hbg_port_mode { + /* 0x0 ~ 0x5 are reserved */ + HBG_PORT_MODE_SGMII_10M = 0x6, + HBG_PORT_MODE_SGMII_100M = 0x7, + HBG_PORT_MODE_SGMII_1000M = 0x8, +}; + +struct hbg_tx_desc { + u32 word0; + u32 word1; + u32 word2; /* pkt_addr */ + u32 word3; /* clear_addr */ +}; + +#define HBG_TX_DESC_W0_IP_OFF_M GENMASK(30, 26) +#define HBG_TX_DESC_W0_l3_CS_B BIT(2) +#define HBG_TX_DESC_W0_WB_B BIT(1) +#define HBG_TX_DESC_W0_l4_CS_B BIT(0) +#define HBG_TX_DESC_W1_SEND_LEN_M GENMASK(19, 4) + +struct hbg_rx_desc { + u32 word0; + u32 word1; /* tag */ + u32 word2; + u32 word3; + u32 word4; + u32 word5; +}; + +#define HBG_RX_DESC_W2_PKT_LEN_M GENMASK(31, 16) + +#endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c new file mode 100644 index 000000000000..f4f256a0dfea --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c @@ -0,0 +1,409 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2024 Hisilicon Limited. + +#include <net/netdev_queues.h> +#include "hbg_common.h" +#include "hbg_irq.h" +#include "hbg_reg.h" +#include "hbg_txrx.h" + +#define netdev_get_tx_ring(netdev) \ + (&(((struct hbg_priv *)netdev_priv(netdev))->tx_ring)) + +#define buffer_to_dma_dir(buffer) (((buffer)->dir == HBG_DIR_RX) ? \ + DMA_FROM_DEVICE : DMA_TO_DEVICE) + +#define hbg_queue_used_num(head, tail, ring) ({ \ + typeof(ring) _ring = (ring); \ + ((tail) + _ring->len - (head)) % _ring->len; }) +#define hbg_queue_left_num(head, tail, ring) ({ \ + typeof(ring) _r = (ring); \ + _r->len - hbg_queue_used_num((head), (tail), _r) - 1; }) +#define hbg_queue_is_empty(head, tail, ring) \ + (hbg_queue_used_num((head), (tail), (ring)) == 0) +#define hbg_queue_is_full(head, tail, ring) \ + (hbg_queue_left_num((head), (tail), (ring)) == 0) +#define hbg_queue_next_prt(p, ring) (((p) + 1) % (ring)->len) +#define hbg_queue_move_next(p, ring) ({ \ + typeof(ring) _ring = (ring); \ + _ring->p = hbg_queue_next_prt(_ring->p, _ring); }) + +#define HBG_TX_STOP_THRS 2 +#define HBG_TX_START_THRS (2 * HBG_TX_STOP_THRS) + +static int hbg_dma_map(struct hbg_buffer *buffer) +{ + struct hbg_priv *priv = buffer->priv; + + buffer->skb_dma = dma_map_single(&priv->pdev->dev, + buffer->skb->data, buffer->skb_len, + buffer_to_dma_dir(buffer)); + if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma))) + return -ENOMEM; + + return 0; +} + +static void hbg_dma_unmap(struct hbg_buffer *buffer) +{ + struct hbg_priv *priv = buffer->priv; + + if (unlikely(!buffer->skb_dma)) + return; + + dma_unmap_single(&priv->pdev->dev, buffer->skb_dma, buffer->skb_len, + buffer_to_dma_dir(buffer)); + buffer->skb_dma = 0; +} + +static void hbg_init_tx_desc(struct hbg_buffer *buffer, + struct hbg_tx_desc *tx_desc) +{ + u32 ip_offset = buffer->skb->network_header - buffer->skb->mac_header; + u32 word0 = 0; + + word0 |= FIELD_PREP(HBG_TX_DESC_W0_WB_B, HBG_STATUS_ENABLE); + word0 |= FIELD_PREP(HBG_TX_DESC_W0_IP_OFF_M, ip_offset); + if (likely(buffer->skb->ip_summed == CHECKSUM_PARTIAL)) { + word0 |= FIELD_PREP(HBG_TX_DESC_W0_l3_CS_B, HBG_STATUS_ENABLE); + word0 |= FIELD_PREP(HBG_TX_DESC_W0_l4_CS_B, HBG_STATUS_ENABLE); + } + + tx_desc->word0 = word0; + tx_desc->word1 = FIELD_PREP(HBG_TX_DESC_W1_SEND_LEN_M, + buffer->skb->len); + tx_desc->word2 = buffer->skb_dma; + tx_desc->word3 = buffer->state_dma; +} + +netdev_tx_t hbg_net_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct hbg_ring *ring = netdev_get_tx_ring(netdev); + struct hbg_priv *priv = netdev_priv(netdev); + /* This smp_load_acquire() pairs with smp_store_release() in + * hbg_napi_tx_recycle() called in tx interrupt handle process. + */ + u32 ntc = smp_load_acquire(&ring->ntc); + struct hbg_buffer *buffer; + struct hbg_tx_desc tx_desc; + u32 ntu = ring->ntu; + + if (unlikely(!skb->len || + skb->len > hbg_spec_max_frame_len(priv, HBG_DIR_TX))) { + dev_kfree_skb_any(skb); + netdev->stats.tx_errors++; + return NETDEV_TX_OK; + } + + if (!netif_subqueue_maybe_stop(netdev, 0, + hbg_queue_left_num(ntc, ntu, ring), + HBG_TX_STOP_THRS, HBG_TX_START_THRS)) + return NETDEV_TX_BUSY; + + buffer = &ring->queue[ntu]; + buffer->skb = skb; + buffer->skb_len = skb->len; + if (unlikely(hbg_dma_map(buffer))) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + buffer->state = HBG_TX_STATE_START; + hbg_init_tx_desc(buffer, &tx_desc); + hbg_hw_set_tx_desc(priv, &tx_desc); + + /* This smp_store_release() pairs with smp_load_acquire() in + * hbg_napi_tx_recycle() called in tx interrupt handle process. + */ + smp_store_release(&ring->ntu, hbg_queue_next_prt(ntu, ring)); + dev_sw_netstats_tx_add(netdev, 1, skb->len); + return NETDEV_TX_OK; +} + +static void hbg_buffer_free_skb(struct hbg_buffer *buffer) +{ + if (unlikely(!buffer->skb)) + return; + + dev_kfree_skb_any(buffer->skb); + buffer->skb = NULL; +} + +static int hbg_buffer_alloc_skb(struct hbg_buffer *buffer) +{ + u32 len = hbg_spec_max_frame_len(buffer->priv, buffer->dir); + struct hbg_priv *priv = buffer->priv; + + buffer->skb = netdev_alloc_skb(priv->netdev, len); + if (unlikely(!buffer->skb)) + return -ENOMEM; + + buffer->skb_len = len; + memset(buffer->skb->data, 0, HBG_PACKET_HEAD_SIZE); + return 0; +} + +static void hbg_buffer_free(struct hbg_buffer *buffer) +{ + hbg_dma_unmap(buffer); + hbg_buffer_free_skb(buffer); +} + +static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget) +{ + struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi); + /* This smp_load_acquire() pairs with smp_store_release() in + * hbg_net_start_xmit() called in xmit process. + */ + u32 ntu = smp_load_acquire(&ring->ntu); + struct hbg_priv *priv = ring->priv; + struct hbg_buffer *buffer; + u32 ntc = ring->ntc; + int packet_done = 0; + + /* We need do cleanup even if budget is 0. + * Per NAPI documentation budget is for Rx. + * So We hardcode the amount of work Tx NAPI does to 128. + */ + budget = 128; + while (packet_done < budget) { + if (unlikely(hbg_queue_is_empty(ntc, ntu, ring))) + break; + + /* make sure HW write desc complete */ + dma_rmb(); + + buffer = &ring->queue[ntc]; + if (buffer->state != HBG_TX_STATE_COMPLETE) + break; + + hbg_buffer_free(buffer); + ntc = hbg_queue_next_prt(ntc, ring); + packet_done++; + } + + /* This smp_store_release() pairs with smp_load_acquire() in + * hbg_net_start_xmit() called in xmit process. + */ + smp_store_release(&ring->ntc, ntc); + netif_wake_queue(priv->netdev); + + if (likely(packet_done < budget && + napi_complete_done(napi, packet_done))) + hbg_hw_irq_enable(priv, HBG_INT_MSK_TX_B, true); + + return packet_done; +} + +static int hbg_rx_fill_one_buffer(struct hbg_priv *priv) +{ + struct hbg_ring *ring = &priv->rx_ring; + struct hbg_buffer *buffer; + int ret; + + if (hbg_queue_is_full(ring->ntc, ring->ntu, ring)) + return 0; + + buffer = &ring->queue[ring->ntu]; + ret = hbg_buffer_alloc_skb(buffer); + if (unlikely(ret)) + return ret; + + ret = hbg_dma_map(buffer); + if (unlikely(ret)) { + hbg_buffer_free_skb(buffer); + return ret; + } + + hbg_hw_fill_buffer(priv, buffer->skb_dma); + hbg_queue_move_next(ntu, ring); + return 0; +} + +static bool hbg_sync_data_from_hw(struct hbg_priv *priv, + struct hbg_buffer *buffer) +{ + struct hbg_rx_desc *rx_desc; + + /* make sure HW write desc complete */ + dma_rmb(); + + dma_sync_single_for_cpu(&priv->pdev->dev, buffer->skb_dma, + buffer->skb_len, DMA_FROM_DEVICE); + + rx_desc = (struct hbg_rx_desc *)buffer->skb->data; + return FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2) != 0; +} + +static int hbg_napi_rx_poll(struct napi_struct *napi, int budget) +{ + struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi); + struct hbg_priv *priv = ring->priv; + struct hbg_rx_desc *rx_desc; + struct hbg_buffer *buffer; + u32 packet_done = 0; + u32 pkt_len; + + while (packet_done < budget) { + if (unlikely(hbg_queue_is_empty(ring->ntc, ring->ntu, ring))) + break; + + buffer = &ring->queue[ring->ntc]; + if (unlikely(!buffer->skb)) + goto next_buffer; + + if (unlikely(!hbg_sync_data_from_hw(priv, buffer))) + break; + rx_desc = (struct hbg_rx_desc *)buffer->skb->data; + pkt_len = FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2); + + hbg_dma_unmap(buffer); + + skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN); + skb_put(buffer->skb, pkt_len); + buffer->skb->protocol = eth_type_trans(buffer->skb, + priv->netdev); + + dev_sw_netstats_rx_add(priv->netdev, pkt_len); + napi_gro_receive(napi, buffer->skb); + buffer->skb = NULL; + +next_buffer: + hbg_rx_fill_one_buffer(priv); + hbg_queue_move_next(ntc, ring); + packet_done++; + } + + if (likely(packet_done < budget && + napi_complete_done(napi, packet_done))) + hbg_hw_irq_enable(priv, HBG_INT_MSK_RX_B, true); + + return packet_done; +} + +static void hbg_ring_uninit(struct hbg_ring *ring) +{ + struct hbg_buffer *buffer; + u32 i; + + if (!ring->queue) + return; + + napi_disable(&ring->napi); + netif_napi_del(&ring->napi); + + for (i = 0; i < ring->len; i++) { + buffer = &ring->queue[i]; + hbg_buffer_free(buffer); + buffer->ring = NULL; + buffer->priv = NULL; + } + + dma_free_coherent(&ring->priv->pdev->dev, + ring->len * sizeof(*ring->queue), + ring->queue, ring->queue_dma); + ring->queue = NULL; + ring->queue_dma = 0; + ring->len = 0; + ring->priv = NULL; +} + +static int hbg_ring_init(struct hbg_priv *priv, struct hbg_ring *ring, + int (*napi_poll)(struct napi_struct *, int), + enum hbg_dir dir) +{ + struct hbg_buffer *buffer; + u32 i, len; + + len = hbg_get_spec_fifo_max_num(priv, dir) + 1; + ring->queue = dma_alloc_coherent(&priv->pdev->dev, + len * sizeof(*ring->queue), + &ring->queue_dma, GFP_KERNEL); + if (!ring->queue) + return -ENOMEM; + + for (i = 0; i < len; i++) { + buffer = &ring->queue[i]; + buffer->skb_len = 0; + buffer->dir = dir; + buffer->ring = ring; + buffer->priv = priv; + buffer->state_dma = ring->queue_dma + (i * sizeof(*buffer)); + } + + ring->dir = dir; + ring->priv = priv; + ring->ntc = 0; + ring->ntu = 0; + ring->len = len; + + if (dir == HBG_DIR_TX) + netif_napi_add_tx(priv->netdev, &ring->napi, napi_poll); + else + netif_napi_add(priv->netdev, &ring->napi, napi_poll); + + napi_enable(&ring->napi); + return 0; +} + +static int hbg_tx_ring_init(struct hbg_priv *priv) +{ + struct hbg_ring *tx_ring = &priv->tx_ring; + + if (!tx_ring->tout_log_buf) + tx_ring->tout_log_buf = devm_kmalloc(&priv->pdev->dev, + HBG_TX_TIMEOUT_BUF_LEN, + GFP_KERNEL); + + if (!tx_ring->tout_log_buf) + return -ENOMEM; + + return hbg_ring_init(priv, tx_ring, hbg_napi_tx_recycle, HBG_DIR_TX); +} + +static int hbg_rx_ring_init(struct hbg_priv *priv) +{ + int ret; + u32 i; + + ret = hbg_ring_init(priv, &priv->rx_ring, hbg_napi_rx_poll, HBG_DIR_RX); + if (ret) + return ret; + + for (i = 0; i < priv->rx_ring.len - 1; i++) { + ret = hbg_rx_fill_one_buffer(priv); + if (ret) { + hbg_ring_uninit(&priv->rx_ring); + return ret; + } + } + + return 0; +} + +int hbg_txrx_init(struct hbg_priv *priv) +{ + int ret; + + ret = hbg_tx_ring_init(priv); + if (ret) { + dev_err(&priv->pdev->dev, + "failed to init tx ring, ret = %d\n", ret); + return ret; + } + + ret = hbg_rx_ring_init(priv); + if (ret) { + dev_err(&priv->pdev->dev, + "failed to init rx ring, ret = %d\n", ret); + hbg_ring_uninit(&priv->tx_ring); + } + + return ret; +} + +void hbg_txrx_uninit(struct hbg_priv *priv) +{ + hbg_ring_uninit(&priv->tx_ring); + hbg_ring_uninit(&priv->rx_ring); +} diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h new file mode 100644 index 000000000000..2883a5899ae2 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2024 Hisilicon Limited. */ + +#ifndef __HBG_TXRX_H +#define __HBG_TXRX_H + +#include <linux/etherdevice.h> +#include "hbg_hw.h" + +static inline u32 hbg_spec_max_frame_len(struct hbg_priv *priv, + enum hbg_dir dir) +{ + return (dir == HBG_DIR_TX) ? priv->dev_specs.max_frame_len : + priv->dev_specs.rx_buf_size; +} + +static inline u32 hbg_get_spec_fifo_max_num(struct hbg_priv *priv, + enum hbg_dir dir) +{ + return (dir == HBG_DIR_TX) ? priv->dev_specs.tx_fifo_num : + priv->dev_specs.rx_fifo_num; +} + +static inline bool hbg_fifo_is_full(struct hbg_priv *priv, enum hbg_dir dir) +{ + return hbg_hw_get_fifo_used_num(priv, dir) >= + hbg_get_spec_fifo_max_num(priv, dir); +} + +static inline u32 hbg_get_queue_used_num(struct hbg_ring *ring) +{ + return (ring->ntu + ring->len - ring->ntc) % ring->len; +} + +netdev_tx_t hbg_net_start_xmit(struct sk_buff *skb, struct net_device *netdev); +int hbg_txrx_init(struct hbg_priv *priv); +void hbg_txrx_uninit(struct hbg_priv *priv); + +#endif |