diff options
Diffstat (limited to 'drivers/net/ethernet/wangxun')
33 files changed, 5946 insertions, 513 deletions
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig index 85cdbdd44fec..e5fc942c28cc 100644 --- a/drivers/net/ethernet/wangxun/Kconfig +++ b/drivers/net/ethernet/wangxun/Kconfig @@ -18,6 +18,7 @@ if NET_VENDOR_WANGXUN config LIBWX tristate + depends on PTP_1588_CLOCK_OPTIONAL select PAGE_POOL help Common library for Wangxun(R) Ethernet drivers. @@ -25,6 +26,7 @@ config LIBWX config NGBE tristate "Wangxun(R) GbE PCI Express adapters support" depends on PCI + depends on PTP_1588_CLOCK_OPTIONAL select LIBWX select PHYLINK help @@ -38,13 +40,13 @@ config NGBE will be called ngbe. config TXGBE - tristate "Wangxun(R) 10GbE PCI Express adapters support" + tristate "Wangxun(R) 10/25/40GbE PCI Express adapters support" depends on PCI depends on COMMON_CLK + depends on I2C_DESIGNWARE_PLATFORM + depends on PTP_1588_CLOCK_OPTIONAL select MARVELL_10G_PHY select REGMAP - select I2C - select I2C_DESIGNWARE_PLATFORM select PHYLINK select HWMON if TXGBE=y select SFP @@ -53,7 +55,7 @@ config TXGBE select PCS_XPCS select LIBWX help - This driver supports Wangxun(R) 10GbE PCI Express family of + This driver supports Wangxun(R) 10/25/40GbE PCI Express family of adapters. More specific information on configuring the driver is in diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile index 42ccd6e4052e..9b78b604a94e 100644 --- a/drivers/net/ethernet/wangxun/libwx/Makefile +++ b/drivers/net/ethernet/wangxun/libwx/Makefile @@ -4,4 +4,4 @@ obj-$(CONFIG_LIBWX) += libwx.o -libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o +libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o wx_ptp.o wx_mbx.o wx_sriov.o diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index cc3bec42ed8e..c12a4cb951f6 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -41,6 +41,14 @@ static const struct wx_stats wx_gstrings_stats[] = { WX_STAT("rx_csum_offload_good_count", hw_csum_rx_good), WX_STAT("rx_csum_offload_errors", hw_csum_rx_error), WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), + WX_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + WX_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), + WX_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), +}; + +static const struct wx_stats wx_gstrings_fdir_stats[] = { + WX_STAT("fdir_match", stats.fdirmatch), + WX_STAT("fdir_miss", stats.fdirmiss), }; /* drivers allocates num_tx_queues and num_rx_queues symmetrically so @@ -55,13 +63,17 @@ static const struct wx_stats wx_gstrings_stats[] = { (WX_NUM_TX_QUEUES + WX_NUM_RX_QUEUES) * \ (sizeof(struct wx_queue_stats) / sizeof(u64))) #define WX_GLOBAL_STATS_LEN ARRAY_SIZE(wx_gstrings_stats) +#define WX_FDIR_STATS_LEN ARRAY_SIZE(wx_gstrings_fdir_stats) #define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN) int wx_get_sset_count(struct net_device *netdev, int sset) { + struct wx *wx = netdev_priv(netdev); + switch (sset) { case ETH_SS_STATS: - return WX_STATS_LEN; + return (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) ? + WX_STATS_LEN + WX_FDIR_STATS_LEN : WX_STATS_LEN; default: return -EOPNOTSUPP; } @@ -70,6 +82,7 @@ EXPORT_SYMBOL(wx_get_sset_count); void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { + struct wx *wx = netdev_priv(netdev); u8 *p = data; int i; @@ -77,6 +90,10 @@ void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) case ETH_SS_STATS: for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) ethtool_puts(&p, wx_gstrings_stats[i].stat_string); + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) { + for (i = 0; i < WX_FDIR_STATS_LEN; i++) + ethtool_puts(&p, wx_gstrings_fdir_stats[i].stat_string); + } for (i = 0; i < netdev->num_tx_queues; i++) { ethtool_sprintf(&p, "tx_queue_%u_packets", i); ethtool_sprintf(&p, "tx_queue_%u_bytes", i); @@ -96,7 +113,7 @@ void wx_get_ethtool_stats(struct net_device *netdev, struct wx *wx = netdev_priv(netdev); struct wx_ring *ring; unsigned int start; - int i, j; + int i, j, k; char *p; wx_update_stats(wx); @@ -107,6 +124,13 @@ void wx_get_ethtool_stats(struct net_device *netdev, sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) { + for (k = 0; k < WX_FDIR_STATS_LEN; k++) { + p = (char *)wx + wx_gstrings_fdir_stats[k].stat_offset; + data[i++] = *(u64 *)p; + } + } + for (j = 0; j < netdev->num_tx_queues; j++) { ring = wx->tx_ring[j]; if (!ring) { @@ -172,17 +196,21 @@ EXPORT_SYMBOL(wx_get_pause_stats); void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { + unsigned int stats_len = WX_STATS_LEN; struct wx *wx = netdev_priv(netdev); + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) + stats_len += WX_FDIR_STATS_LEN; + strscpy(info->driver, wx->driver_name, sizeof(info->driver)); strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version)); strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info)); if (wx->num_tx_queues <= WX_NUM_TX_QUEUES) { - info->n_stats = WX_STATS_LEN - + info->n_stats = stats_len - (WX_NUM_TX_QUEUES - wx->num_tx_queues) * (sizeof(struct wx_queue_stats) / sizeof(u64)) * 2; } else { - info->n_stats = WX_STATS_LEN; + info->n_stats = stats_len; } } EXPORT_SYMBOL(wx_get_drvinfo); @@ -191,6 +219,9 @@ int wx_nway_reset(struct net_device *netdev) { struct wx *wx = netdev_priv(netdev); + if (wx->mac.type == wx_mac_aml40) + return -EOPNOTSUPP; + return phylink_ethtool_nway_reset(wx->phylink); } EXPORT_SYMBOL(wx_nway_reset); @@ -209,6 +240,9 @@ int wx_set_link_ksettings(struct net_device *netdev, { struct wx *wx = netdev_priv(netdev); + if (wx->mac.type == wx_mac_aml40) + return -EOPNOTSUPP; + return phylink_ethtool_ksettings_set(wx->phylink, cmd); } EXPORT_SYMBOL(wx_set_link_ksettings); @@ -218,6 +252,9 @@ void wx_get_pauseparam(struct net_device *netdev, { struct wx *wx = netdev_priv(netdev); + if (wx->mac.type == wx_mac_aml40) + return; + phylink_ethtool_get_pauseparam(wx->phylink, pause); } EXPORT_SYMBOL(wx_get_pauseparam); @@ -227,6 +264,9 @@ int wx_set_pauseparam(struct net_device *netdev, { struct wx *wx = netdev_priv(netdev); + if (wx->mac.type == wx_mac_aml40) + return -EOPNOTSUPP; + return phylink_ethtool_set_pauseparam(wx->phylink, pause); } EXPORT_SYMBOL(wx_set_pauseparam); @@ -297,10 +337,18 @@ int wx_set_coalesce(struct net_device *netdev, if (ec->tx_max_coalesced_frames_irq) wx->tx_work_limit = ec->tx_max_coalesced_frames_irq; - if (wx->mac.type == wx_mac_sp) + switch (wx->mac.type) { + case wx_mac_sp: max_eitr = WX_SP_MAX_EITR; - else + break; + case wx_mac_aml: + case wx_mac_aml40: + max_eitr = WX_AML_MAX_EITR; + break; + default: max_eitr = WX_EM_MAX_EITR; + break; + } if ((ec->rx_coalesce_usecs > (max_eitr >> 2)) || (ec->tx_coalesce_usecs > (max_eitr >> 2))) @@ -322,10 +370,16 @@ int wx_set_coalesce(struct net_device *netdev, wx->tx_itr_setting = ec->tx_coalesce_usecs; if (wx->tx_itr_setting == 1) { - if (wx->mac.type == wx_mac_sp) + switch (wx->mac.type) { + case wx_mac_sp: + case wx_mac_aml: + case wx_mac_aml40: tx_itr_param = WX_12K_ITR; - else + break; + default: tx_itr_param = WX_20K_ITR; + break; + } } else { tx_itr_param = wx->tx_itr_setting; } @@ -358,7 +412,7 @@ static unsigned int wx_max_channels(struct wx *wx) max_combined = 1; } else { /* support up to max allowed queues with RSS */ - if (wx->mac.type == wx_mac_sp) + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) max_combined = 63; else max_combined = 8; @@ -383,6 +437,9 @@ void wx_get_channels(struct net_device *dev, /* record RSS queues */ ch->combined_count = wx->ring_feature[RING_F_RSS].indices; + + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) + ch->combined_count = wx->ring_feature[RING_F_FDIR].indices; } EXPORT_SYMBOL(wx_get_channels); @@ -400,6 +457,9 @@ int wx_set_channels(struct net_device *dev, if (count > wx_max_channels(wx)) return -EINVAL; + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) + wx->ring_feature[RING_F_FDIR].limit = count; + wx->ring_feature[RING_F_RSS].limit = count; return 0; @@ -421,3 +481,53 @@ void wx_set_msglevel(struct net_device *netdev, u32 data) wx->msg_enable = data; } EXPORT_SYMBOL(wx_set_msglevel); + +int wx_get_ts_info(struct net_device *dev, + struct kernel_ethtool_ts_info *info) +{ + struct wx *wx = netdev_priv(dev); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (wx->ptp_clock) + info->phc_index = ptp_clock_index(wx->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); + + return 0; +} +EXPORT_SYMBOL(wx_get_ts_info); + +void wx_get_ptp_stats(struct net_device *dev, + struct ethtool_ts_stats *ts_stats) +{ + struct wx *wx = netdev_priv(dev); + + if (wx->ptp_clock) { + ts_stats->pkts = wx->tx_hwtstamp_pkts; + ts_stats->lost = wx->tx_hwtstamp_timeouts + + wx->tx_hwtstamp_skipped + + wx->rx_hwtstamp_cleared; + ts_stats->err = wx->tx_hwtstamp_errors; + } +} +EXPORT_SYMBOL(wx_get_ptp_stats); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h index 600c3b597d1a..9e002e699eca 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h @@ -40,4 +40,8 @@ int wx_set_channels(struct net_device *dev, struct ethtool_channels *ch); u32 wx_get_msglevel(struct net_device *netdev); void wx_set_msglevel(struct net_device *netdev, u32 data); +int wx_get_ts_info(struct net_device *dev, + struct kernel_ethtool_ts_info *info); +void wx_get_ptp_stats(struct net_device *dev, + struct ethtool_ts_stats *ts_stats); #endif /* _WX_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index 945c13d1a982..0f4be72116b8 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -10,6 +10,7 @@ #include "wx_type.h" #include "wx_lib.h" +#include "wx_sriov.h" #include "wx_hw.h" static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum) @@ -112,7 +113,7 @@ static void wx_intr_disable(struct wx *wx, u64 qmask) if (mask) wr32(wx, WX_PX_IMS(0), mask); - if (wx->mac.type == wx_mac_sp) { + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { mask = (qmask >> 32); if (mask) wr32(wx, WX_PX_IMS(1), mask); @@ -126,7 +127,8 @@ void wx_intr_enable(struct wx *wx, u64 qmask) mask = (qmask & U32_MAX); if (mask) wr32(wx, WX_PX_IMC(0), mask); - if (wx->mac.type == wx_mac_sp) { + + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { mask = (qmask >> 32); if (mask) wr32(wx, WX_PX_IMC(1), mask); @@ -278,22 +280,8 @@ static int wx_acquire_sw_sync(struct wx *wx, u32 mask) return ret; } -/** - * wx_host_interface_command - Issue command to manageability block - * @wx: pointer to the HW structure - * @buffer: contains the command to write and where the return status will - * be placed - * @length: length of buffer, must be multiple of 4 bytes - * @timeout: time in ms to wait for command completion - * @return_data: read and return data from the buffer (true) or not (false) - * Needed because FW structures are big endian and decoding of - * these fields can be 8 bit or 16 bit based on command. Decoding - * is not easily understood without making a table of commands. - * So we will leave this up to the caller to read back the data - * in these cases. - **/ -int wx_host_interface_command(struct wx *wx, u32 *buffer, - u32 length, u32 timeout, bool return_data) +static int wx_host_interface_command_s(struct wx *wx, u32 *buffer, + u32 length, u32 timeout, bool return_data) { u32 hdr_size = sizeof(struct wx_hic_hdr); u32 hicr, i, bi, buf[64] = {}; @@ -301,22 +289,10 @@ int wx_host_interface_command(struct wx *wx, u32 *buffer, u32 dword_len; u16 buf_len; - if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) { - wx_err(wx, "Buffer length failure buffersize=%d.\n", length); - return -EINVAL; - } - status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB); if (status != 0) return status; - /* Calculate length in DWORDs. We must be DWORD aligned */ - if ((length % (sizeof(u32))) != 0) { - wx_err(wx, "Buffer length failure, not aligned to dword"); - status = -EINVAL; - goto rel_out; - } - dword_len = length >> 2; /* The device driver writes the relevant command block @@ -334,27 +310,25 @@ int wx_host_interface_command(struct wx *wx, u32 *buffer, status = read_poll_timeout(rd32, hicr, hicr & WX_MNG_MBOX_CTL_FWRDY, 1000, timeout * 1000, false, wx, WX_MNG_MBOX_CTL); + buf[0] = rd32(wx, WX_MNG_MBOX); + if ((buf[0] & 0xff0000) >> 16 == 0x80) { + wx_err(wx, "Unknown FW command: 0x%x\n", buffer[0] & 0xff); + status = -EINVAL; + goto rel_out; + } + /* Check command completion */ if (status) { - wx_dbg(wx, "Command has failed with no status valid.\n"); - - buf[0] = rd32(wx, WX_MNG_MBOX); - if ((buffer[0] & 0xff) != (~buf[0] >> 24)) { - status = -EINVAL; - goto rel_out; - } - if ((buf[0] & 0xff0000) >> 16 == 0x80) { - wx_dbg(wx, "It's unknown cmd.\n"); - status = -EINVAL; - goto rel_out; - } - + wx_err(wx, "Command has failed with no status valid.\n"); wx_dbg(wx, "write value:\n"); for (i = 0; i < dword_len; i++) wx_dbg(wx, "%x ", buffer[i]); wx_dbg(wx, "read value:\n"); for (i = 0; i < dword_len; i++) wx_dbg(wx, "%x ", buf[i]); + wx_dbg(wx, "\ncheck: %x %x\n", buffer[0] & 0xff, ~buf[0] >> 24); + + goto rel_out; } if (!return_data) @@ -393,8 +367,166 @@ rel_out: wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB); return status; } + +static bool wx_poll_fw_reply(struct wx *wx, u32 *buffer, u8 send_cmd) +{ + u32 dword_len = sizeof(struct wx_hic_hdr) >> 2; + struct wx_hic_hdr *recv_hdr; + u32 i; + + /* read hdr */ + for (i = 0; i < dword_len; i++) { + buffer[i] = rd32a(wx, WX_FW2SW_MBOX, i); + le32_to_cpus(&buffer[i]); + } + + /* check hdr */ + recv_hdr = (struct wx_hic_hdr *)buffer; + if (recv_hdr->cmd == send_cmd && + recv_hdr->index == wx->swfw_index) + return true; + + return false; +} + +static int wx_host_interface_command_r(struct wx *wx, u32 *buffer, + u32 length, u32 timeout, bool return_data) +{ + struct wx_hic_hdr *hdr = (struct wx_hic_hdr *)buffer; + u32 hdr_size = sizeof(struct wx_hic_hdr); + bool busy, reply; + u32 dword_len; + u16 buf_len; + int err = 0; + u8 send_cmd; + u32 i; + + /* wait to get lock */ + might_sleep(); + err = read_poll_timeout(test_and_set_bit, busy, !busy, 1000, timeout * 1000, + false, WX_STATE_SWFW_BUSY, wx->state); + if (err) + return err; + + /* index to unique seq id for each mbox message */ + hdr->index = wx->swfw_index; + send_cmd = hdr->cmd; + + dword_len = length >> 2; + /* write data to SW-FW mbox array */ + for (i = 0; i < dword_len; i++) { + wr32a(wx, WX_SW2FW_MBOX, i, (__force u32)cpu_to_le32(buffer[i])); + /* write flush */ + rd32a(wx, WX_SW2FW_MBOX, i); + } + + /* generate interrupt to notify FW */ + wr32m(wx, WX_SW2FW_MBOX_CMD, WX_SW2FW_MBOX_CMD_VLD, 0); + wr32m(wx, WX_SW2FW_MBOX_CMD, WX_SW2FW_MBOX_CMD_VLD, WX_SW2FW_MBOX_CMD_VLD); + + /* polling reply from FW */ + err = read_poll_timeout(wx_poll_fw_reply, reply, reply, 2000, + timeout * 1000, true, wx, buffer, send_cmd); + if (err) { + wx_err(wx, "Polling from FW messages timeout, cmd: 0x%x, index: %d\n", + send_cmd, wx->swfw_index); + goto rel_out; + } + + if (hdr->cmd_or_resp.ret_status == 0x80) { + wx_err(wx, "Unknown FW command: 0x%x\n", send_cmd); + err = -EINVAL; + goto rel_out; + } + + /* expect no reply from FW then return */ + if (!return_data) + goto rel_out; + + /* If there is any thing in data position pull it in */ + buf_len = hdr->buf_len; + if (buf_len == 0) + goto rel_out; + + if (length < buf_len + hdr_size) { + wx_err(wx, "Buffer not large enough for reply message.\n"); + err = -EFAULT; + goto rel_out; + } + + /* Calculate length in DWORDs, add 3 for odd lengths */ + dword_len = (buf_len + 3) >> 2; + for (i = hdr_size >> 2; i <= dword_len; i++) { + buffer[i] = rd32a(wx, WX_FW2SW_MBOX, i); + le32_to_cpus(&buffer[i]); + } + +rel_out: + /* index++, index replace wx_hic_hdr.checksum */ + if (wx->swfw_index == WX_HIC_HDR_INDEX_MAX) + wx->swfw_index = 0; + else + wx->swfw_index++; + + clear_bit(WX_STATE_SWFW_BUSY, wx->state); + return err; +} + +/** + * wx_host_interface_command - Issue command to manageability block + * @wx: pointer to the HW structure + * @buffer: contains the command to write and where the return status will + * be placed + * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion + * @return_data: read and return data from the buffer (true) or not (false) + * Needed because FW structures are big endian and decoding of + * these fields can be 8 bit or 16 bit based on command. Decoding + * is not easily understood without making a table of commands. + * So we will leave this up to the caller to read back the data + * in these cases. + **/ +int wx_host_interface_command(struct wx *wx, u32 *buffer, + u32 length, u32 timeout, bool return_data) +{ + if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) { + wx_err(wx, "Buffer length failure buffersize=%d.\n", length); + return -EINVAL; + } + + /* Calculate length in DWORDs. We must be DWORD aligned */ + if ((length % (sizeof(u32))) != 0) { + wx_err(wx, "Buffer length failure, not aligned to dword"); + return -EINVAL; + } + + if (test_bit(WX_FLAG_SWFW_RING, wx->flags)) + return wx_host_interface_command_r(wx, buffer, length, + timeout, return_data); + + return wx_host_interface_command_s(wx, buffer, length, timeout, return_data); +} EXPORT_SYMBOL(wx_host_interface_command); +int wx_set_pps(struct wx *wx, bool enable, u64 nsec, u64 cycles) +{ + struct wx_hic_set_pps pps_cmd; + + pps_cmd.hdr.cmd = FW_PPS_SET_CMD; + pps_cmd.hdr.buf_len = FW_PPS_SET_LEN; + pps_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + pps_cmd.lan_id = wx->bus.func; + pps_cmd.enable = (u8)enable; + pps_cmd.nsec = nsec; + pps_cmd.cycles = cycles; + pps_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + + return wx_host_interface_command(wx, (u32 *)&pps_cmd, + sizeof(pps_cmd), + WX_HI_COMMAND_TIMEOUT, + false); +} + /** * wx_read_ee_hostif_data - Read EEPROM word using a host interface cmd * assuming that the semaphore is already obtained. @@ -425,7 +557,10 @@ static int wx_read_ee_hostif_data(struct wx *wx, u16 offset, u16 *data) if (status != 0) return status; - *data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET); + if (!test_bit(WX_FLAG_SWFW_RING, wx->flags)) + *data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET); + else + *data = (u16)rd32a(wx, WX_FW2SW_MBOX, FW_NVM_DATA_OFFSET); return status; } @@ -469,6 +604,7 @@ int wx_read_ee_hostif_buffer(struct wx *wx, u16 words_to_read; u32 value = 0; int status; + u32 mbox; u32 i; /* Take semaphore for the entire operation. */ @@ -501,8 +637,12 @@ int wx_read_ee_hostif_buffer(struct wx *wx, goto out; } + if (!test_bit(WX_FLAG_SWFW_RING, wx->flags)) + mbox = WX_MNG_MBOX; + else + mbox = WX_FW2SW_MBOX; for (i = 0; i < words_to_read; i++) { - u32 reg = WX_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) + 2 * i; + u32 reg = mbox + (FW_NVM_DATA_OFFSET << 2) + 2 * i; value = rd32(wx, reg); data[current_word] = (u16)(value & 0xffff); @@ -552,12 +692,18 @@ void wx_init_eeprom_params(struct wx *wx) } } - if (wx->mac.type == wx_mac_sp) { + switch (wx->mac.type) { + case wx_mac_sp: + case wx_mac_aml: + case wx_mac_aml40: if (wx_read_ee_hostif(wx, WX_SW_REGION_PTR, &data)) { wx_err(wx, "NVM Read Error\n"); return; } data = data >> 1; + break; + default: + break; } eeprom->sw_region_offset = data; @@ -618,7 +764,8 @@ static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools, /* setup VMDq pool mapping */ wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF); - if (wx->mac.type == wx_mac_sp) + + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32); /* HW expects these in little endian so we reverse the byte @@ -757,7 +904,7 @@ void wx_init_rx_addrs(struct wx *wx) wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV); - if (wx->mac.type == wx_mac_sp) { + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { /* clear VMDq pool/queue selection for RAR 0 */ wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL); } @@ -804,11 +951,28 @@ static void wx_sync_mac_table(struct wx *wx) } } +static void wx_full_sync_mac_table(struct wx *wx) +{ + int i; + + for (i = 0; i < wx->mac.num_rar_entries; i++) { + if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) { + wx_set_rar(wx, i, + wx->mac_table[i].addr, + wx->mac_table[i].pools, + WX_PSR_MAC_SWC_AD_H_AV); + } else { + wx_clear_rar(wx, i); + } + wx->mac_table[i].state &= ~(WX_MAC_STATE_MODIFIED); + } +} + /* this function destroys the first RAR entry */ void wx_mac_set_default_filter(struct wx *wx, u8 *addr) { memcpy(&wx->mac_table[0].addr, addr, ETH_ALEN); - wx->mac_table[0].pools = 1ULL; + wx->mac_table[0].pools = BIT(VMDQ_P(0)); wx->mac_table[0].state = (WX_MAC_STATE_DEFAULT | WX_MAC_STATE_IN_USE); wx_set_rar(wx, 0, wx->mac_table[0].addr, wx->mac_table[0].pools, @@ -833,7 +997,7 @@ void wx_flush_sw_mac_table(struct wx *wx) } EXPORT_SYMBOL(wx_flush_sw_mac_table); -static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool) +int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool) { u32 i; @@ -864,7 +1028,7 @@ static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool) return -ENOMEM; } -static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool) +int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool) { u32 i; @@ -1046,6 +1210,35 @@ static void wx_update_mc_addr_list(struct wx *wx, struct net_device *netdev) wx_dbg(wx, "Update mc addr list Complete\n"); } +static void wx_restore_vf_multicasts(struct wx *wx) +{ + u32 i, j, vector_bit, vector_reg; + struct vf_data_storage *vfinfo; + + for (i = 0; i < wx->num_vfs; i++) { + u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(i)); + + vfinfo = &wx->vfinfo[i]; + for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { + wx->addr_ctrl.mta_in_use++; + vector_reg = WX_PSR_MC_TBL_REG(vfinfo->vf_mc_hashes[j]); + vector_bit = WX_PSR_MC_TBL_BIT(vfinfo->vf_mc_hashes[j]); + wr32m(wx, WX_PSR_MC_TBL(vector_reg), + BIT(vector_bit), BIT(vector_bit)); + /* errata 5: maintain a copy of the reg table conf */ + wx->mac.mta_shadow[vector_reg] |= BIT(vector_bit); + } + if (vfinfo->num_vf_mc_hashes) + vmolr |= WX_PSR_VM_L2CTL_ROMPE; + else + vmolr &= ~WX_PSR_VM_L2CTL_ROMPE; + wr32(wx, WX_PSR_VM_L2CTL(i), vmolr); + } + + /* Restore any VF macvlans */ + wx_full_sync_mac_table(wx); +} + /** * wx_write_mc_addr_list - write multicast addresses to MTA * @netdev: network interface device structure @@ -1063,6 +1256,9 @@ static int wx_write_mc_addr_list(struct net_device *netdev) wx_update_mc_addr_list(wx, netdev); + if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) + wx_restore_vf_multicasts(wx); + return netdev_mc_count(netdev); } @@ -1083,7 +1279,7 @@ int wx_set_mac(struct net_device *netdev, void *p) if (retval) return retval; - wx_del_mac_filter(wx, wx->mac.addr, 0); + wx_del_mac_filter(wx, wx->mac.addr, VMDQ_P(0)); eth_hw_addr_set(netdev, addr->sa_data); memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len); @@ -1147,8 +1343,15 @@ static void wx_enable_rx(struct wx *wx) static void wx_set_rxpba(struct wx *wx) { u32 rxpktsize, txpktsize, txpbthresh; + u32 pbsize = wx->mac.rx_pb_size; + + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) { + if (test_bit(WX_FLAG_FDIR_HASH, wx->flags) || + test_bit(WX_FLAG_FDIR_PERFECT, wx->flags)) + pbsize -= 64; /* Default 64KB */ + } - rxpktsize = wx->mac.rx_pb_size << WX_RDB_PB_SZ_SHIFT; + rxpktsize = pbsize << WX_RDB_PB_SZ_SHIFT; wr32(wx, WX_RDB_PB_SZ(0), rxpktsize); /* Only support an equally distributed Tx packet buffer strategy. */ @@ -1178,6 +1381,10 @@ static int wx_hpbthresh(struct wx *wx) /* Calculate delay value for device */ dv_id = WX_DV(link, tc); + /* Loopback switch introduces additional latency */ + if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) + dv_id += WX_B2BT(tc); + /* Delay value is calculated in bit times convert to KB */ kb = WX_BT2KB(dv_id); rx_pba = rd32(wx, WX_RDB_PB_SZ(0)) >> WX_RDB_PB_SZ_SHIFT; @@ -1233,12 +1440,107 @@ static void wx_pbthresh_setup(struct wx *wx) wx->fc.low_water = 0; } +static void wx_set_ethertype_anti_spoofing(struct wx *wx, bool enable, int vf) +{ + u32 pfvfspoof, reg_offset, vf_shift; + + vf_shift = WX_VF_IND_SHIFT(vf); + reg_offset = WX_VF_REG_OFFSET(vf); + + pfvfspoof = rd32(wx, WX_TDM_ETYPE_AS(reg_offset)); + if (enable) + pfvfspoof |= BIT(vf_shift); + else + pfvfspoof &= ~BIT(vf_shift); + wr32(wx, WX_TDM_ETYPE_AS(reg_offset), pfvfspoof); +} + +int wx_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) +{ + u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf); + struct wx *wx = netdev_priv(netdev); + u32 regval; + + if (vf >= wx->num_vfs) + return -EINVAL; + + wx->vfinfo[vf].spoofchk_enabled = setting; + + regval = (setting << vf_bit); + wr32m(wx, WX_TDM_MAC_AS(index), regval | BIT(vf_bit), regval); + + if (wx->vfinfo[vf].vlan_count) + wr32m(wx, WX_TDM_VLAN_AS(index), regval | BIT(vf_bit), regval); + + return 0; +} + +static void wx_configure_virtualization(struct wx *wx) +{ + u16 pool = wx->num_rx_pools; + u32 reg_offset, vf_shift; + u32 i; + + if (!test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) + return; + + wr32m(wx, WX_PSR_VM_CTL, + WX_PSR_VM_CTL_POOL_MASK | WX_PSR_VM_CTL_REPLEN, + FIELD_PREP(WX_PSR_VM_CTL_POOL_MASK, VMDQ_P(0)) | + WX_PSR_VM_CTL_REPLEN); + while (pool--) + wr32m(wx, WX_PSR_VM_L2CTL(pool), + WX_PSR_VM_L2CTL_AUPE, WX_PSR_VM_L2CTL_AUPE); + + if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { + vf_shift = BIT(VMDQ_P(0)); + /* Enable only the PF pools for Tx/Rx */ + wr32(wx, WX_RDM_VF_RE(0), vf_shift); + wr32(wx, WX_TDM_VF_TE(0), vf_shift); + } else { + vf_shift = WX_VF_IND_SHIFT(VMDQ_P(0)); + reg_offset = WX_VF_REG_OFFSET(VMDQ_P(0)); + + /* Enable only the PF pools for Tx/Rx */ + wr32(wx, WX_RDM_VF_RE(reg_offset), GENMASK(31, vf_shift)); + wr32(wx, WX_RDM_VF_RE(reg_offset ^ 1), reg_offset - 1); + wr32(wx, WX_TDM_VF_TE(reg_offset), GENMASK(31, vf_shift)); + wr32(wx, WX_TDM_VF_TE(reg_offset ^ 1), reg_offset - 1); + } + + /* clear VLAN promisc flag so VFTA will be updated if necessary */ + clear_bit(WX_FLAG_VLAN_PROMISC, wx->flags); + + for (i = 0; i < wx->num_vfs; i++) { + if (!wx->vfinfo[i].spoofchk_enabled) + wx_set_vf_spoofchk(wx->netdev, i, false); + /* enable ethertype anti spoofing if hw supports it */ + wx_set_ethertype_anti_spoofing(wx, true, i); + } +} + static void wx_configure_port(struct wx *wx) { u32 value, i; - value = WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ; + if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { + value = (wx->num_vfs == 0) ? + WX_CFG_PORT_CTL_NUM_VT_NONE : + WX_CFG_PORT_CTL_NUM_VT_8; + } else { + if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) { + if (wx->ring_feature[RING_F_RSS].indices == 4) + value = WX_CFG_PORT_CTL_NUM_VT_32; + else + value = WX_CFG_PORT_CTL_NUM_VT_64; + } else { + value = 0; + } + } + + value |= WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ; wr32m(wx, WX_CFG_PORT_CTL, + WX_CFG_PORT_CTL_NUM_VT_MASK | WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ, value); @@ -1261,7 +1563,7 @@ static void wx_configure_port(struct wx *wx) * Stops the receive data path and waits for the HW to internally empty * the Rx security block **/ -static int wx_disable_sec_rx_path(struct wx *wx) +int wx_disable_sec_rx_path(struct wx *wx) { u32 secrx; @@ -1271,6 +1573,7 @@ static int wx_disable_sec_rx_path(struct wx *wx) return read_poll_timeout(rd32, secrx, secrx & WX_RSC_ST_RSEC_RDY, 1000, 40000, false, wx, WX_RSC_ST); } +EXPORT_SYMBOL(wx_disable_sec_rx_path); /** * wx_enable_sec_rx_path - Enables the receive data path @@ -1278,11 +1581,12 @@ static int wx_disable_sec_rx_path(struct wx *wx) * * Enables the receive data path. **/ -static void wx_enable_sec_rx_path(struct wx *wx) +void wx_enable_sec_rx_path(struct wx *wx) { wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_RX_DIS, 0); WX_WRITE_FLUSH(wx); } +EXPORT_SYMBOL(wx_enable_sec_rx_path); static void wx_vlan_strip_control(struct wx *wx, bool enable) { @@ -1297,6 +1601,83 @@ static void wx_vlan_strip_control(struct wx *wx, bool enable) } } +static void wx_vlan_promisc_enable(struct wx *wx) +{ + u32 vlnctrl, i, vind, bits, reg_idx; + + vlnctrl = rd32(wx, WX_PSR_VLAN_CTL); + if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) { + /* we need to keep the VLAN filter on in SRIOV */ + vlnctrl |= WX_PSR_VLAN_CTL_VFE; + wr32(wx, WX_PSR_VLAN_CTL, vlnctrl); + } else { + vlnctrl &= ~WX_PSR_VLAN_CTL_VFE; + wr32(wx, WX_PSR_VLAN_CTL, vlnctrl); + return; + } + /* We are already in VLAN promisc, nothing to do */ + if (test_bit(WX_FLAG_VLAN_PROMISC, wx->flags)) + return; + /* Set flag so we don't redo unnecessary work */ + set_bit(WX_FLAG_VLAN_PROMISC, wx->flags); + /* Add PF to all active pools */ + for (i = WX_PSR_VLAN_SWC_ENTRIES; --i;) { + wr32(wx, WX_PSR_VLAN_SWC_IDX, i); + vind = WX_VF_IND_SHIFT(VMDQ_P(0)); + reg_idx = WX_VF_REG_OFFSET(VMDQ_P(0)); + bits = rd32(wx, WX_PSR_VLAN_SWC_VM(reg_idx)); + bits |= BIT(vind); + wr32(wx, WX_PSR_VLAN_SWC_VM(reg_idx), bits); + } + /* Set all bits in the VLAN filter table array */ + for (i = 0; i < wx->mac.vft_size; i++) + wr32(wx, WX_PSR_VLAN_TBL(i), U32_MAX); +} + +static void wx_scrub_vfta(struct wx *wx) +{ + u32 i, vid, bits, vfta, vind, vlvf, reg_idx; + + for (i = WX_PSR_VLAN_SWC_ENTRIES; --i;) { + wr32(wx, WX_PSR_VLAN_SWC_IDX, i); + vlvf = rd32(wx, WX_PSR_VLAN_SWC_IDX); + /* pull VLAN ID from VLVF */ + vid = vlvf & ~WX_PSR_VLAN_SWC_VIEN; + if (vlvf & WX_PSR_VLAN_SWC_VIEN) { + /* if PF is part of this then continue */ + if (test_bit(vid, wx->active_vlans)) + continue; + } + /* remove PF from the pool */ + vind = WX_VF_IND_SHIFT(VMDQ_P(0)); + reg_idx = WX_VF_REG_OFFSET(VMDQ_P(0)); + bits = rd32(wx, WX_PSR_VLAN_SWC_VM(reg_idx)); + bits &= ~BIT(vind); + wr32(wx, WX_PSR_VLAN_SWC_VM(reg_idx), bits); + } + /* extract values from vft_shadow and write back to VFTA */ + for (i = 0; i < wx->mac.vft_size; i++) { + vfta = wx->mac.vft_shadow[i]; + wr32(wx, WX_PSR_VLAN_TBL(i), vfta); + } +} + +static void wx_vlan_promisc_disable(struct wx *wx) +{ + u32 vlnctrl; + + /* configure vlan filtering */ + vlnctrl = rd32(wx, WX_PSR_VLAN_CTL); + vlnctrl |= WX_PSR_VLAN_CTL_VFE; + wr32(wx, WX_PSR_VLAN_CTL, vlnctrl); + /* We are not in VLAN promisc, nothing to do */ + if (!test_bit(WX_FLAG_VLAN_PROMISC, wx->flags)) + return; + /* Set flag so we don't redo unnecessary work */ + clear_bit(WX_FLAG_VLAN_PROMISC, wx->flags); + wx_scrub_vfta(wx); +} + void wx_set_rx_mode(struct net_device *netdev) { struct wx *wx = netdev_priv(netdev); @@ -1309,7 +1690,7 @@ void wx_set_rx_mode(struct net_device *netdev) /* Check for Promiscuous and All Multicast modes */ fctrl = rd32(wx, WX_PSR_CTL); fctrl &= ~(WX_PSR_CTL_UPE | WX_PSR_CTL_MPE); - vmolr = rd32(wx, WX_PSR_VM_L2CTL(0)); + vmolr = rd32(wx, WX_PSR_VM_L2CTL(VMDQ_P(0))); vmolr &= ~(WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_ROPE | @@ -1330,7 +1711,10 @@ void wx_set_rx_mode(struct net_device *netdev) fctrl |= WX_PSR_CTL_UPE | WX_PSR_CTL_MPE; /* pf don't want packets routing to vf, so clear UPE */ vmolr |= WX_PSR_VM_L2CTL_MPE; - vlnctrl &= ~WX_PSR_VLAN_CTL_VFE; + if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags) && + test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) + vlnctrl |= WX_PSR_VLAN_CTL_VFE; + features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; } if (netdev->flags & IFF_ALLMULTI) { @@ -1353,7 +1737,7 @@ void wx_set_rx_mode(struct net_device *netdev) * sufficient space to store all the addresses then enable * unicast promiscuous mode */ - count = wx_write_uc_addr_list(netdev, 0); + count = wx_write_uc_addr_list(netdev, VMDQ_P(0)); if (count < 0) { vmolr &= ~WX_PSR_VM_L2CTL_ROPE; vmolr |= WX_PSR_VM_L2CTL_UPE; @@ -1371,7 +1755,7 @@ void wx_set_rx_mode(struct net_device *netdev) wr32(wx, WX_PSR_VLAN_CTL, vlnctrl); wr32(wx, WX_PSR_CTL, fctrl); - wr32(wx, WX_PSR_VM_L2CTL(0), vmolr); + wr32(wx, WX_PSR_VM_L2CTL(VMDQ_P(0)), vmolr); if ((features & NETIF_F_HW_VLAN_CTAG_RX) && (features & NETIF_F_HW_VLAN_STAG_RX)) @@ -1379,6 +1763,10 @@ void wx_set_rx_mode(struct net_device *netdev) else wx_vlan_strip_control(wx, false); + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + wx_vlan_promisc_disable(wx); + else + wx_vlan_promisc_enable(wx); } EXPORT_SYMBOL(wx_set_rx_mode); @@ -1408,7 +1796,7 @@ int wx_change_mtu(struct net_device *netdev, int new_mtu) { struct wx *wx = netdev_priv(netdev); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); wx_set_rx_buffer_len(wx); return 0; @@ -1499,6 +1887,13 @@ static void wx_configure_tx_ring(struct wx *wx, txdctl |= ring->count / 128 << WX_PX_TR_CFG_TR_SIZE_SHIFT; txdctl |= 0x20 << WX_PX_TR_CFG_WTHRESH_SHIFT; + ring->atr_count = 0; + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags) && + test_bit(WX_FLAG_FDIR_HASH, wx->flags)) + ring->atr_sample_rate = wx->atr_sample_rate; + else + ring->atr_sample_rate = 0; + /* reinitialize tx_buffer_info */ memset(ring->tx_buffer_info, 0, sizeof(struct wx_tx_buffer) * ring->count); @@ -1621,6 +2016,13 @@ static void wx_setup_reta(struct wx *wx) u32 random_key_size = WX_RSS_KEY_SIZE / 4; u32 i, j; + if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) { + if (wx->mac.type == wx_mac_em) + rss_i = 1; + else + rss_i = rss_i < 4 ? 4 : rss_i; + } + /* Fill out hash function seeds */ for (i = 0; i < random_key_size; i++) wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]); @@ -1638,10 +2040,42 @@ static void wx_setup_reta(struct wx *wx) wx_store_reta(wx); } +#define WX_RDB_RSS_PL_2 FIELD_PREP(GENMASK(31, 29), 1) +#define WX_RDB_RSS_PL_4 FIELD_PREP(GENMASK(31, 29), 2) +static void wx_setup_psrtype(struct wx *wx) +{ + int rss_i = wx->ring_feature[RING_F_RSS].indices; + u32 psrtype; + int pool; + + psrtype = WX_RDB_PL_CFG_L4HDR | + WX_RDB_PL_CFG_L3HDR | + WX_RDB_PL_CFG_L2HDR | + WX_RDB_PL_CFG_TUN_OUTL2HDR | + WX_RDB_PL_CFG_TUN_TUNHDR; + + if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { + for_each_set_bit(pool, &wx->fwd_bitmask, 8) + wr32(wx, WX_RDB_PL_CFG(VMDQ_P(pool)), psrtype); + } else { + if (rss_i > 3) + psrtype |= WX_RDB_RSS_PL_4; + else if (rss_i > 1) + psrtype |= WX_RDB_RSS_PL_2; + + for_each_set_bit(pool, &wx->fwd_bitmask, 32) + wr32(wx, WX_RDB_PL_CFG(VMDQ_P(pool)), psrtype); + } +} + static void wx_setup_mrqc(struct wx *wx) { u32 rss_field = 0; + /* VT, and RSS do not coexist at the same time */ + if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) + return; + /* Disable indicating checksum in descriptor, enables RSS hash */ wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD); @@ -1671,21 +2105,16 @@ static void wx_setup_mrqc(struct wx *wx) **/ void wx_configure_rx(struct wx *wx) { - u32 psrtype, i; int ret; + u32 i; wx_disable_rx(wx); - - psrtype = WX_RDB_PL_CFG_L4HDR | - WX_RDB_PL_CFG_L3HDR | - WX_RDB_PL_CFG_L2HDR | - WX_RDB_PL_CFG_TUN_TUNHDR; - wr32(wx, WX_RDB_PL_CFG(0), psrtype); + wx_setup_psrtype(wx); /* enable hw crc stripping */ wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP); - if (wx->mac.type == wx_mac_sp) { + if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) { u32 psrctl; /* RSC Setup */ @@ -1728,11 +2157,14 @@ void wx_configure(struct wx *wx) { wx_set_rxpba(wx); wx_pbthresh_setup(wx); + wx_configure_virtualization(wx); wx_configure_port(wx); wx_set_rx_mode(wx->netdev); wx_restore_vlan(wx); - wx_enable_sec_rx_path(wx); + + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) + wx->configure_fdir(wx); wx_configure_tx(wx); wx_configure_rx(wx); @@ -1820,10 +2252,8 @@ int wx_stop_adapter(struct wx *wx) } EXPORT_SYMBOL(wx_stop_adapter); -void wx_reset_misc(struct wx *wx) +void wx_reset_mac(struct wx *wx) { - int i; - /* receive packets that size > 2048 */ wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE); @@ -1835,6 +2265,14 @@ void wx_reset_misc(struct wx *wx) WX_MAC_RX_FLOW_CTRL_RFE, WX_MAC_RX_FLOW_CTRL_RFE); wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); +} +EXPORT_SYMBOL(wx_reset_mac); + +void wx_reset_misc(struct wx *wx) +{ + int i; + + wx_reset_mac(wx); wr32m(wx, WX_MIS_RST_ST, WX_MIS_RST_ST_RST_INIT, 0x1E00); @@ -1958,6 +2396,10 @@ int wx_sw_init(struct wx *wx) return -ENOMEM; } + bitmap_zero(wx->state, WX_STATE_NBITS); + bitmap_zero(wx->flags, WX_PF_FLAGS_NBITS); + wx->misc_irq_domain = false; + return 0; } EXPORT_SYMBOL(wx_sw_init); @@ -2080,7 +2522,7 @@ static int wx_set_vlvf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on, * * Turn on/off specified VLAN in the VLAN filter table. **/ -static int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on) +int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on) { u32 bitindex, vfta, targetbit; bool vfta_changed = false; @@ -2331,7 +2773,13 @@ void wx_update_stats(struct wx *wx) hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT); hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT); - for (i = 0; i < wx->mac.max_rx_queues; i++) + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) { + hwstats->fdirmatch += rd32(wx, WX_RDB_FDIR_MATCH); + hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS); + } + + for (i = wx->num_vfs * wx->num_rx_queues_per_pool; + i < wx->mac.max_rx_queues; i++) hwstats->qmprc += rd32(wx, WX_PX_MPRC(i)); } EXPORT_SYMBOL(wx_update_stats); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h index 9e219fa717a2..26a56cba60b9 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h @@ -18,6 +18,7 @@ void wx_control_hw(struct wx *wx, bool drv); int wx_mng_present(struct wx *wx); int wx_host_interface_command(struct wx *wx, u32 *buffer, u32 length, u32 timeout, bool return_data); +int wx_set_pps(struct wx *wx, bool enable, u64 nsec, u64 cycles); int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data); int wx_read_ee_hostif_buffer(struct wx *wx, u16 offset, u16 words, u16 *data); @@ -25,9 +26,14 @@ void wx_init_eeprom_params(struct wx *wx); void wx_get_mac_addr(struct wx *wx, u8 *mac_addr); void wx_init_rx_addrs(struct wx *wx); void wx_mac_set_default_filter(struct wx *wx, u8 *addr); +int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool); +int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool); void wx_flush_sw_mac_table(struct wx *wx); int wx_set_mac(struct net_device *netdev, void *p); void wx_disable_rx(struct wx *wx); +int wx_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); +int wx_disable_sec_rx_path(struct wx *wx); +void wx_enable_sec_rx_path(struct wx *wx); void wx_set_rx_mode(struct net_device *netdev); int wx_change_mtu(struct net_device *netdev, int new_mtu); void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring); @@ -36,9 +42,11 @@ void wx_configure(struct wx *wx); void wx_start_hw(struct wx *wx); int wx_disable_pcie_master(struct wx *wx); int wx_stop_adapter(struct wx *wx); +void wx_reset_mac(struct wx *wx); void wx_reset_misc(struct wx *wx); int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count); int wx_sw_init(struct wx *wx); +int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on); int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index 6fae161cbcb8..5c747509d56b 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -5,6 +5,7 @@ #include <net/ip6_checksum.h> #include <net/page_pool/helpers.h> #include <net/inet_ecn.h> +#include <linux/workqueue.h> #include <linux/iopoll.h> #include <linux/sctp.h> #include <linux/pci.h> @@ -13,6 +14,7 @@ #include "wx_type.h" #include "wx_lib.h" +#include "wx_ptp.h" #include "wx_hw.h" /* Lookup table mapping the HW PTYPE to the bit field for decoding */ @@ -148,10 +150,11 @@ static struct wx_dec_ptype wx_ptype_lookup[256] = { [0xFD] = WX_PTT(IP, IPV6, IGMV, IPV6, SCTP, PAY4), }; -static struct wx_dec_ptype wx_decode_ptype(const u8 ptype) +struct wx_dec_ptype wx_decode_ptype(const u8 ptype) { return wx_ptype_lookup[ptype]; } +EXPORT_SYMBOL(wx_decode_ptype); /* wx_test_staterr - tests bits in Rx descriptor status and error fields */ static __le32 wx_test_staterr(union wx_rx_desc *rx_desc, @@ -250,10 +253,7 @@ static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring, rx_buffer->page_offset; /* prefetch first cache line of first page */ - prefetch(page_addr); -#if L1_CACHE_BYTES < 128 - prefetch(page_addr + L1_CACHE_BYTES); -#endif + net_prefetch(page_addr); /* allocate a skb to store the frags */ skb = napi_alloc_skb(&rx_ring->q_vector->napi, WX_RXBUFFER_256); @@ -311,7 +311,8 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring, return true; page = page_pool_dev_alloc_pages(rx_ring->page_pool); - WARN_ON(!page); + if (unlikely(!page)) + return false; dma = page_pool_get_dma_addr(page); bi->page_dma = dma; @@ -547,7 +548,8 @@ static void wx_rx_checksum(struct wx_ring *ring, return; /* Hardware can't guarantee csum if IPv6 Dest Header found */ - if (dptype.prot != WX_DEC_PTYPE_PROT_SCTP && WX_RXD_IPV6EX(rx_desc)) + if (dptype.prot != WX_DEC_PTYPE_PROT_SCTP && + wx_test_staterr(rx_desc, WX_RXD_STAT_IPV6EX)) return; /* if L4 checksum error */ @@ -599,8 +601,17 @@ static void wx_process_skb_fields(struct wx_ring *rx_ring, union wx_rx_desc *rx_desc, struct sk_buff *skb) { + struct wx *wx = netdev_priv(rx_ring->netdev); + wx_rx_hash(rx_ring, rx_desc, skb); wx_rx_checksum(rx_ring, rx_desc, skb); + + if (unlikely(test_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, wx->flags)) && + unlikely(wx_test_staterr(rx_desc, WX_RXD_STAT_TS))) { + wx_ptp_rx_hwtstamp(rx_ring->q_vector->wx, skb); + rx_ring->last_rx_timestamp = jiffies; + } + wx_rx_vlan(rx_ring, rx_desc, skb); skb_record_rx_queue(skb, rx_ring->queue_index); skb->protocol = eth_type_trans(skb, rx_ring->netdev); @@ -707,6 +718,7 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector, { unsigned int budget = q_vector->wx->tx_work_limit; unsigned int total_bytes = 0, total_packets = 0; + struct wx *wx = netdev_priv(tx_ring->netdev); unsigned int i = tx_ring->next_to_clean; struct wx_tx_buffer *tx_buffer; union wx_tx_desc *tx_desc; @@ -739,6 +751,11 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector, total_bytes += tx_buffer->bytecount; total_packets += tx_buffer->gso_segs; + /* schedule check for Tx timestamp */ + if (unlikely(test_bit(WX_STATE_PTP_TX_IN_PROGRESS, wx->state)) && + skb_shinfo(tx_buffer->skb)->tx_flags & SKBTX_IN_PROGRESS) + ptp_schedule_worker(wx->ptp_clock, 0); + /* free the skb */ napi_consume_skb(tx_buffer->skb, napi_budget); @@ -934,9 +951,9 @@ static void wx_tx_olinfo_status(union wx_tx_desc *tx_desc, tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); } -static void wx_tx_map(struct wx_ring *tx_ring, - struct wx_tx_buffer *first, - const u8 hdr_len) +static int wx_tx_map(struct wx_ring *tx_ring, + struct wx_tx_buffer *first, + const u8 hdr_len) { struct sk_buff *skb = first->skb; struct wx_tx_buffer *tx_buffer; @@ -1015,6 +1032,8 @@ static void wx_tx_map(struct wx_ring *tx_ring, netdev_tx_sent_queue(wx_txring_txq(tx_ring), first->bytecount); + /* set the timestamp */ + first->time_stamp = jiffies; skb_tx_timestamp(skb); /* Force memory writes to complete before letting h/w know there @@ -1040,7 +1059,7 @@ static void wx_tx_map(struct wx_ring *tx_ring, if (netif_xmit_stopped(wx_txring_txq(tx_ring)) || !netdev_xmit_more()) writel(i, tx_ring->tail); - return; + return 0; dma_error: dev_err(tx_ring->dev, "TX DMA map failed\n"); @@ -1064,6 +1083,8 @@ dma_error: first->skb = NULL; tx_ring->next_to_use = i; + + return -ENOMEM; } static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens, @@ -1084,26 +1105,6 @@ static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens, context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); } -static void wx_get_ipv6_proto(struct sk_buff *skb, int offset, u8 *nexthdr) -{ - struct ipv6hdr *hdr = (struct ipv6hdr *)(skb->data + offset); - - *nexthdr = hdr->nexthdr; - offset += sizeof(struct ipv6hdr); - while (ipv6_ext_hdr(*nexthdr)) { - struct ipv6_opt_hdr _hdr, *hp; - - if (*nexthdr == NEXTHDR_NONE) - return; - hp = skb_header_pointer(skb, offset, sizeof(_hdr), &_hdr); - if (!hp) - return; - if (*nexthdr == NEXTHDR_FRAGMENT) - break; - *nexthdr = hp->nexthdr; - } -} - union network_header { struct iphdr *ipv4; struct ipv6hdr *ipv6; @@ -1114,6 +1115,8 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first) { u8 tun_prot = 0, l4_prot = 0, ptype = 0; struct sk_buff *skb = first->skb; + unsigned char *exthdr, *l4_hdr; + __be16 frag_off; if (skb->encapsulation) { union network_header hdr; @@ -1124,14 +1127,18 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first) ptype = WX_PTYPE_TUN_IPV4; break; case htons(ETH_P_IPV6): - wx_get_ipv6_proto(skb, skb_network_offset(skb), &tun_prot); + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr); + tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, &tun_prot, &frag_off); ptype = WX_PTYPE_TUN_IPV6; break; default: return ptype; } - if (tun_prot == IPPROTO_IPIP) { + if (tun_prot == IPPROTO_IPIP || tun_prot == IPPROTO_IPV6) { hdr.raw = (void *)inner_ip_hdr(skb); ptype |= WX_PTYPE_PKT_IPIP; } else if (tun_prot == IPPROTO_UDP) { @@ -1168,7 +1175,11 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first) l4_prot = hdr.ipv4->protocol; break; case 6: - wx_get_ipv6_proto(skb, skb_inner_network_offset(skb), &l4_prot); + l4_hdr = skb_inner_transport_header(skb); + exthdr = skb_inner_network_header(skb) + sizeof(struct ipv6hdr); + l4_prot = inner_ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off); ptype |= WX_PTYPE_PKT_IPV6; break; default: @@ -1181,7 +1192,11 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first) ptype = WX_PTYPE_PKT_IP; break; case htons(ETH_P_IPV6): - wx_get_ipv6_proto(skb, skb_network_offset(skb), &l4_prot); + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr); + l4_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off); ptype = WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6; break; default: @@ -1271,13 +1286,20 @@ static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first, /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ if (enc) { + unsigned char *exthdr, *l4_hdr; + __be16 frag_off; + switch (first->protocol) { case htons(ETH_P_IP): tun_prot = ip_hdr(skb)->protocol; first->tx_flags |= WX_TX_FLAGS_OUTER_IPV4; break; case htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr); tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, &tun_prot, &frag_off); break; default: break; @@ -1300,6 +1322,7 @@ static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first, WX_TXD_TUNNEL_LEN_SHIFT); break; case IPPROTO_IPIP: + case IPPROTO_IPV6: tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) - (char *)ip_hdr(skb)) >> 2) << WX_TXD_OUTER_IPLEN_SHIFT; @@ -1337,12 +1360,15 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, u8 tun_prot = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: if (!(first->tx_flags & WX_TX_FLAGS_HW_VLAN) && !(first->tx_flags & WX_TX_FLAGS_CC)) return; vlan_macip_lens = skb_network_offset(skb) << WX_TXD_MACLEN_SHIFT; } else { + unsigned char *exthdr, *l4_hdr; + __be16 frag_off; u8 l4_prot = 0; union { struct iphdr *ipv4; @@ -1364,7 +1390,12 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, tun_prot = ip_hdr(skb)->protocol; break; case htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr); tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &tun_prot, &frag_off); break; default: return; @@ -1388,6 +1419,7 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, WX_TXD_TUNNEL_LEN_SHIFT); break; case IPPROTO_IPIP: + case IPPROTO_IPV6: tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) - (char *)ip_hdr(skb)) >> 2) << WX_TXD_OUTER_IPLEN_SHIFT; @@ -1410,7 +1442,10 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, break; case 6: vlan_macip_lens |= (transport_hdr.raw - network_hdr.raw) >> 1; + exthdr = network_hdr.raw + sizeof(struct ipv6hdr); l4_prot = network_hdr.ipv6->nexthdr; + if (transport_hdr.raw != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off); break; default: break; @@ -1430,7 +1465,8 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, WX_TXD_L4LEN_SHIFT; break; default: - break; + skb_checksum_help(skb); + goto csum_failed; } /* update TX checksum flag */ @@ -1453,6 +1489,7 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, struct wx_ring *tx_ring) { + struct wx *wx = netdev_priv(tx_ring->netdev); u16 count = TXD_USE_COUNT(skb_headlen(skb)); struct wx_tx_buffer *first; u8 hdr_len = 0, ptype; @@ -1487,6 +1524,20 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, tx_flags |= WX_TX_FLAGS_HW_VLAN; } + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + wx->ptp_clock) { + if (wx->tstamp_config.tx_type == HWTSTAMP_TX_ON && + !test_and_set_bit_lock(WX_STATE_PTP_TX_IN_PROGRESS, + wx->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= WX_TX_FLAGS_TSTAMP; + wx->ptp_tx_skb = skb_get(skb); + wx->ptp_tx_start = jiffies; + } else { + wx->tx_hwtstamp_skipped++; + } + } + /* record initial flags and protocol */ first->tx_flags = tx_flags; first->protocol = vlan_get_protocol(skb); @@ -1498,12 +1549,24 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, goto out_drop; else if (!tso) wx_tx_csum(tx_ring, first, ptype); - wx_tx_map(tx_ring, first, hdr_len); + + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags) && tx_ring->atr_sample_rate) + wx->atr(tx_ring, first, ptype); + + if (wx_tx_map(tx_ring, first, hdr_len)) + goto cleanup_tx_tstamp; return NETDEV_TX_OK; out_drop: dev_kfree_skb_any(first->skb); first->skb = NULL; +cleanup_tx_tstamp: + if (unlikely(tx_flags & WX_TX_FLAGS_TSTAMP)) { + dev_kfree_skb_any(wx->ptp_tx_skb); + wx->ptp_tx_skb = NULL; + wx->tx_hwtstamp_errors++; + clear_bit_unlock(WX_STATE_PTP_TX_IN_PROGRESS, wx->state); + } return NETDEV_TX_OK; } @@ -1558,6 +1621,65 @@ void wx_napi_disable_all(struct wx *wx) } EXPORT_SYMBOL(wx_napi_disable_all); +static bool wx_set_vmdq_queues(struct wx *wx) +{ + u16 vmdq_i = wx->ring_feature[RING_F_VMDQ].limit; + u16 rss_i = wx->ring_feature[RING_F_RSS].limit; + u16 rss_m = WX_RSS_DISABLED_MASK; + u16 vmdq_m = 0; + + /* only proceed if VMDq is enabled */ + if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) + return false; + /* Add starting offset to total pool count */ + vmdq_i += wx->ring_feature[RING_F_VMDQ].offset; + + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { + /* double check we are limited to maximum pools */ + vmdq_i = min_t(u16, 64, vmdq_i); + + /* 64 pool mode with 2 queues per pool, or + * 16/32/64 pool mode with 1 queue per pool + */ + if (vmdq_i > 32 || rss_i < 4) { + vmdq_m = WX_VMDQ_2Q_MASK; + rss_m = WX_RSS_2Q_MASK; + rss_i = min_t(u16, rss_i, 2); + /* 32 pool mode with 4 queues per pool */ + } else { + vmdq_m = WX_VMDQ_4Q_MASK; + rss_m = WX_RSS_4Q_MASK; + rss_i = 4; + } + } else { + /* double check we are limited to maximum pools */ + vmdq_i = min_t(u16, 8, vmdq_i); + + /* when VMDQ on, disable RSS */ + rss_i = 1; + } + + /* remove the starting offset from the pool count */ + vmdq_i -= wx->ring_feature[RING_F_VMDQ].offset; + + /* save features for later use */ + wx->ring_feature[RING_F_VMDQ].indices = vmdq_i; + wx->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* limit RSS based on user input and save for later use */ + wx->ring_feature[RING_F_RSS].indices = rss_i; + wx->ring_feature[RING_F_RSS].mask = rss_m; + + wx->queues_per_pool = rss_i;/*maybe same to num_rx_queues_per_pool*/ + wx->num_rx_pools = vmdq_i; + wx->num_rx_queues_per_pool = rss_i; + + wx->num_rx_queues = vmdq_i * rss_i; + wx->num_tx_queues = vmdq_i * rss_i; + + return true; +} + /** * wx_set_rss_queues: Allocate queues for RSS * @wx: board private structure to initialize @@ -1572,10 +1694,33 @@ static void wx_set_rss_queues(struct wx *wx) /* set mask for 16 queue limit of RSS */ f = &wx->ring_feature[RING_F_RSS]; + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) + f->mask = WX_RSS_64Q_MASK; + else + f->mask = WX_RSS_8Q_MASK; f->indices = f->limit; - wx->num_rx_queues = f->limit; - wx->num_tx_queues = f->limit; + if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))) + goto out; + + clear_bit(WX_FLAG_FDIR_HASH, wx->flags); + + /* Use Flow Director in addition to RSS to ensure the best + * distribution of flows across cores, even when an FDIR flow + * isn't matched. + */ + if (f->indices > 1) { + f = &wx->ring_feature[RING_F_FDIR]; + + f->indices = f->limit; + + if (!(test_bit(WX_FLAG_FDIR_PERFECT, wx->flags))) + set_bit(WX_FLAG_FDIR_HASH, wx->flags); + } + +out: + wx->num_rx_queues = f->indices; + wx->num_tx_queues = f->indices; } static void wx_set_num_queues(struct wx *wx) @@ -1585,6 +1730,9 @@ static void wx_set_num_queues(struct wx *wx) wx->num_tx_queues = 1; wx->queues_per_pool = 1; + if (wx_set_vmdq_queues(wx)) + return; + wx_set_rss_queues(wx); } @@ -1665,6 +1813,10 @@ static int wx_set_interrupt_capability(struct wx *wx) if (ret == 0 || (ret == -ENOMEM)) return ret; + /* Disable VMDq support */ + dev_warn(&wx->pdev->dev, "Disabling VMQQ support\n"); + clear_bit(WX_FLAG_VMDQ_ENABLED, wx->flags); + /* Disable RSS */ dev_warn(&wx->pdev->dev, "Disabling RSS support\n"); wx->ring_feature[RING_F_RSS].limit = 1; @@ -1674,22 +1826,66 @@ static int wx_set_interrupt_capability(struct wx *wx) /* minmum one for queue, one for misc*/ nvecs = 1; nvecs = pci_alloc_irq_vectors(pdev, nvecs, - nvecs, PCI_IRQ_MSI | PCI_IRQ_LEGACY); + nvecs, PCI_IRQ_MSI | PCI_IRQ_INTX); if (nvecs == 1) { if (pdev->msi_enabled) wx_err(wx, "Fallback to MSI.\n"); else - wx_err(wx, "Fallback to LEGACY.\n"); + wx_err(wx, "Fallback to INTx.\n"); } else { - wx_err(wx, "Failed to allocate MSI/LEGACY interrupts. Error: %d\n", nvecs); + wx_err(wx, "Failed to allocate MSI/INTx interrupts. Error: %d\n", nvecs); return nvecs; } pdev->irq = pci_irq_vector(pdev, 0); + wx->num_q_vectors = 1; return 0; } +static bool wx_cache_ring_vmdq(struct wx *wx) +{ + struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ]; + struct wx_ring_feature *rss = &wx->ring_feature[RING_F_RSS]; + u16 reg_idx; + int i; + + /* only proceed if VMDq is enabled */ + if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) + return false; + + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < wx->num_rx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= rss->indices) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + wx->rx_ring[i]->reg_idx = reg_idx; + } + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < wx->num_tx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & rss->mask) >= rss->indices) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + wx->tx_ring[i]->reg_idx = reg_idx; + } + } else { + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset; + for (i = 0; i < wx->num_rx_queues; i++) + /* If we are greater than indices move to next pool */ + wx->rx_ring[i]->reg_idx = reg_idx + i; + + reg_idx = vmdq->offset; + for (i = 0; i < wx->num_tx_queues; i++) + /* If we are greater than indices move to next pool */ + wx->tx_ring[i]->reg_idx = reg_idx + i; + } + + return true; +} + /** * wx_cache_ring_rss - Descriptor ring to register mapping for RSS * @wx: board private structure to initialize @@ -1701,6 +1897,9 @@ static void wx_cache_ring_rss(struct wx *wx) { u16 i; + if (wx_cache_ring_vmdq(wx)) + return; + for (i = 0; i < wx->num_rx_queues; i++) wx->rx_ring[i]->reg_idx = i; @@ -1758,10 +1957,17 @@ static int wx_alloc_q_vector(struct wx *wx, /* initialize pointer to rings */ ring = q_vector->ring; - if (wx->mac.type == wx_mac_sp) + switch (wx->mac.type) { + case wx_mac_sp: + case wx_mac_aml: + case wx_mac_aml40: default_itr = WX_12K_ITR; - else + break; + default: default_itr = WX_7K_ITR; + break; + } + /* initialize ITR */ if (txr_count && !rxr_count) /* tx only vector */ @@ -1996,7 +2202,8 @@ void wx_free_irq(struct wx *wx) int vector; if (!(pdev->msix_enabled)) { - free_irq(pdev->irq, wx); + if (!wx->misc_irq_domain) + free_irq(pdev->irq, wx); return; } @@ -2011,7 +2218,7 @@ void wx_free_irq(struct wx *wx) free_irq(entry->vector, q_vector); } - if (wx->mac.type == wx_mac_em) + if (!wx->misc_irq_domain) free_irq(wx->msix_entry->vector, wx); } EXPORT_SYMBOL(wx_free_irq); @@ -2026,6 +2233,9 @@ int wx_setup_isb_resources(struct wx *wx) { struct pci_dev *pdev = wx->pdev; + if (wx->isb_mem) + return 0; + wx->isb_mem = dma_alloc_coherent(&pdev->dev, sizeof(u32) * 4, &wx->isb_dma, @@ -2089,7 +2299,8 @@ static void wx_set_ivar(struct wx *wx, s8 direction, wr32(wx, WX_PX_MISC_IVAR, ivar); } else { /* tx or rx causes */ - msix_vector += 1; /* offset for queue vectors */ + if (!(wx->mac.type == wx_mac_em && wx->num_vfs == 7)) + msix_vector += 1; /* offset for queue vectors */ msix_vector |= WX_PX_IVAR_ALLOC_VAL; index = ((16 * (queue & 1)) + (8 * direction)); ivar = rd32(wx, WX_PX_IVAR(queue >> 1)); @@ -2113,10 +2324,18 @@ void wx_write_eitr(struct wx_q_vector *q_vector) int v_idx = q_vector->v_idx; u32 itr_reg; - if (wx->mac.type == wx_mac_sp) + switch (wx->mac.type) { + case wx_mac_sp: itr_reg = q_vector->itr & WX_SP_MAX_EITR; - else + break; + case wx_mac_aml: + case wx_mac_aml40: + itr_reg = (q_vector->itr >> 3) & WX_AML_MAX_EITR; + break; + default: itr_reg = q_vector->itr & WX_EM_MAX_EITR; + break; + } itr_reg |= WX_PX_ITR_CNT_WDIS; @@ -2127,17 +2346,24 @@ void wx_write_eitr(struct wx_q_vector *q_vector) * wx_configure_vectors - Configure vectors for hardware * @wx: board private structure * - * wx_configure_vectors sets up the hardware to properly generate MSI-X/MSI/LEGACY + * wx_configure_vectors sets up the hardware to properly generate MSI-X/MSI/INTx * interrupts. **/ void wx_configure_vectors(struct wx *wx) { struct pci_dev *pdev = wx->pdev; u32 eitrsel = 0; - u16 v_idx; + u16 v_idx, i; if (pdev->msix_enabled) { /* Populate MSIX to EITR Select */ + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { + if (wx->num_vfs >= 32) + eitrsel = BIT(wx->num_vfs % 32) - 1; + } else { + for (i = 0; i < wx->num_vfs; i++) + eitrsel |= BIT(i); + } wr32(wx, WX_PX_ITRSEL, eitrsel); /* use EIAM to auto-mask when MSI-X interrupt is asserted * this saves a register write for every interrupt @@ -2385,7 +2611,6 @@ static void wx_free_all_tx_resources(struct wx *wx) void wx_free_resources(struct wx *wx) { - wx_free_isb_resources(wx); wx_free_all_rx_resources(wx); wx_free_all_tx_resources(wx); } @@ -2680,6 +2905,7 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features) { netdev_features_t changed = netdev->features ^ features; struct wx *wx = netdev_priv(netdev); + bool need_reset = false; if (features & NETIF_F_RXHASH) { wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, @@ -2690,15 +2916,120 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features) wx->rss_enabled = false; } - if (changed & - (NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_STAG_RX)) + netdev->features = features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX && wx->do_reset) + wx->do_reset(netdev); + else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER)) wx_set_rx_mode(netdev); - return 1; + if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))) + return 0; + + /* Check if Flow Director n-tuple support was enabled or disabled. If + * the state changed, we need to reset. + */ + switch (features & NETIF_F_NTUPLE) { + case NETIF_F_NTUPLE: + /* turn off ATR, enable perfect filters and reset */ + if (!(test_and_set_bit(WX_FLAG_FDIR_PERFECT, wx->flags))) + need_reset = true; + + clear_bit(WX_FLAG_FDIR_HASH, wx->flags); + break; + default: + /* turn off perfect filters, enable ATR and reset */ + if (test_and_clear_bit(WX_FLAG_FDIR_PERFECT, wx->flags)) + need_reset = true; + + /* We cannot enable ATR if RSS is disabled */ + if (wx->ring_feature[RING_F_RSS].limit <= 1) + break; + + set_bit(WX_FLAG_FDIR_HASH, wx->flags); + break; + } + + if (need_reset && wx->do_reset) + wx->do_reset(netdev); + + return 0; } EXPORT_SYMBOL(wx_set_features); +#define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_STAG_RX) + +#define NETIF_VLAN_INSERTION_FEATURES (NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_HW_VLAN_STAG_TX) + +#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ + NETIF_F_HW_VLAN_STAG_FILTER) + +netdev_features_t wx_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct wx *wx = netdev_priv(netdev); + + if (changed & NETIF_VLAN_STRIPPING_FEATURES) { + if ((features & NETIF_VLAN_STRIPPING_FEATURES) != NETIF_VLAN_STRIPPING_FEATURES && + (features & NETIF_VLAN_STRIPPING_FEATURES) != 0) { + features &= ~NETIF_VLAN_STRIPPING_FEATURES; + features |= netdev->features & NETIF_VLAN_STRIPPING_FEATURES; + wx_err(wx, "802.1Q and 802.1ad VLAN stripping must be either both on or both off."); + } + } + + if (changed & NETIF_VLAN_INSERTION_FEATURES) { + if ((features & NETIF_VLAN_INSERTION_FEATURES) != NETIF_VLAN_INSERTION_FEATURES && + (features & NETIF_VLAN_INSERTION_FEATURES) != 0) { + features &= ~NETIF_VLAN_INSERTION_FEATURES; + features |= netdev->features & NETIF_VLAN_INSERTION_FEATURES; + wx_err(wx, "802.1Q and 802.1ad VLAN insertion must be either both on or both off."); + } + } + + if (changed & NETIF_VLAN_FILTERING_FEATURES) { + if ((features & NETIF_VLAN_FILTERING_FEATURES) != NETIF_VLAN_FILTERING_FEATURES && + (features & NETIF_VLAN_FILTERING_FEATURES) != 0) { + features &= ~NETIF_VLAN_FILTERING_FEATURES; + features |= netdev->features & NETIF_VLAN_FILTERING_FEATURES; + wx_err(wx, "802.1Q and 802.1ad VLAN filtering must be either both on or both off."); + } + } + + return features; +} +EXPORT_SYMBOL(wx_fix_features); + +#define WX_MAX_TUNNEL_HDR_LEN 80 +netdev_features_t wx_features_check(struct sk_buff *skb, + struct net_device *netdev, + netdev_features_t features) +{ + struct wx *wx = netdev_priv(netdev); + + if (!skb->encapsulation) + return features; + + if (wx->mac.type == wx_mac_em) + return features & ~NETIF_F_CSUM_MASK; + + if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) > + WX_MAX_TUNNEL_HDR_LEN)) + return features & ~NETIF_F_CSUM_MASK; + + if (skb->inner_protocol_type == ENCAP_TYPE_ETHER && + skb->inner_protocol != htons(ETH_P_IP) && + skb->inner_protocol != htons(ETH_P_IPV6) && + skb->inner_protocol != htons(ETH_P_TEB)) + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + + return features; +} +EXPORT_SYMBOL(wx_features_check); + void wx_set_ring(struct wx *wx, u32 new_tx_count, u32 new_rx_count, struct wx_ring *temp_ring) { @@ -2765,5 +3096,35 @@ void wx_set_ring(struct wx *wx, u32 new_tx_count, } EXPORT_SYMBOL(wx_set_ring); +void wx_service_event_schedule(struct wx *wx) +{ + if (!test_and_set_bit(WX_STATE_SERVICE_SCHED, wx->state)) + queue_work(system_power_efficient_wq, &wx->service_task); +} +EXPORT_SYMBOL(wx_service_event_schedule); + +void wx_service_event_complete(struct wx *wx) +{ + if (WARN_ON(!test_bit(WX_STATE_SERVICE_SCHED, wx->state))) + return; + + /* flush memory to make sure state is correct before next watchdog */ + smp_mb__before_atomic(); + clear_bit(WX_STATE_SERVICE_SCHED, wx->state); +} +EXPORT_SYMBOL(wx_service_event_complete); + +void wx_service_timer(struct timer_list *t) +{ + struct wx *wx = from_timer(wx, t, service_timer); + unsigned long next_event_offset = HZ * 2; + + /* Reset the timer */ + mod_timer(&wx->service_timer, next_event_offset + jiffies); + + wx_service_event_schedule(wx); +} +EXPORT_SYMBOL(wx_service_timer); + MODULE_DESCRIPTION("Common library for Wangxun(R) Ethernet drivers."); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h index ec909e876720..aed6ea8cf0d6 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h @@ -7,6 +7,7 @@ #ifndef _WX_LIB_H_ #define _WX_LIB_H_ +struct wx_dec_ptype wx_decode_ptype(const u8 ptype); void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count); u16 wx_desc_unused(struct wx_ring *ring); netdev_tx_t wx_xmit_frame(struct sk_buff *skb, @@ -30,7 +31,15 @@ int wx_setup_resources(struct wx *wx); void wx_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats); int wx_set_features(struct net_device *netdev, netdev_features_t features); +netdev_features_t wx_fix_features(struct net_device *netdev, + netdev_features_t features); +netdev_features_t wx_features_check(struct sk_buff *skb, + struct net_device *netdev, + netdev_features_t features); void wx_set_ring(struct wx *wx, u32 new_tx_count, u32 new_rx_count, struct wx_ring *temp_ring); +void wx_service_event_schedule(struct wx *wx); +void wx_service_event_complete(struct wx *wx); +void wx_service_timer(struct timer_list *t); -#endif /* _NGBE_LIB_H_ */ +#endif /* _WX_LIB_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.c b/drivers/net/ethernet/wangxun/libwx/wx_mbx.c new file mode 100644 index 000000000000..73af5f11c3bd --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/pci.h> +#include "wx_type.h" +#include "wx_mbx.h" + +/** + * wx_obtain_mbx_lock_pf - obtain mailbox lock + * @wx: pointer to the HW structure + * @vf: the VF index + * + * Return: return 0 on success and -EBUSY on failure + **/ +static int wx_obtain_mbx_lock_pf(struct wx *wx, u16 vf) +{ + int count = 5; + u32 mailbox; + + while (count--) { + /* Take ownership of the buffer */ + wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_PFU); + + /* reserve mailbox for vf use */ + mailbox = rd32(wx, WX_PXMAILBOX(vf)); + if (mailbox & WX_PXMAILBOX_PFU) + return 0; + else if (count) + udelay(10); + } + wx_err(wx, "Failed to obtain mailbox lock for PF%d", vf); + + return -EBUSY; +} + +static int wx_check_for_bit_pf(struct wx *wx, u32 mask, int index) +{ + u32 mbvficr = rd32(wx, WX_MBVFICR(index)); + + if (!(mbvficr & mask)) + return -EBUSY; + wr32(wx, WX_MBVFICR(index), mask); + + return 0; +} + +/** + * wx_check_for_ack_pf - checks to see if the VF has acked + * @wx: pointer to the HW structure + * @vf: the VF index + * + * Return: return 0 if the VF has set the status bit or else -EBUSY + **/ +int wx_check_for_ack_pf(struct wx *wx, u16 vf) +{ + u32 index = vf / 16, vf_bit = vf % 16; + + return wx_check_for_bit_pf(wx, + FIELD_PREP(WX_MBVFICR_VFACK_MASK, + BIT(vf_bit)), + index); +} + +/** + * wx_check_for_msg_pf - checks to see if the VF has sent mail + * @wx: pointer to the HW structure + * @vf: the VF index + * + * Return: return 0 if the VF has got req bit or else -EBUSY + **/ +int wx_check_for_msg_pf(struct wx *wx, u16 vf) +{ + u32 index = vf / 16, vf_bit = vf % 16; + + return wx_check_for_bit_pf(wx, + FIELD_PREP(WX_MBVFICR_VFREQ_MASK, + BIT(vf_bit)), + index); +} + +/** + * wx_write_mbx_pf - Places a message in the mailbox + * @wx: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf: the VF index + * + * Return: return 0 on success and -EINVAL/-EBUSY on failure + **/ +int wx_write_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf) +{ + struct wx_mbx_info *mbx = &wx->mbx; + int ret, i; + + /* mbx->size is up to 15 */ + if (size > mbx->size) { + wx_err(wx, "Invalid mailbox message size %d", size); + return -EINVAL; + } + + /* lock the mailbox to prevent pf/vf race condition */ + ret = wx_obtain_mbx_lock_pf(wx, vf); + if (ret) + return ret; + + /* flush msg and acks as we are overwriting the message buffer */ + wx_check_for_msg_pf(wx, vf); + wx_check_for_ack_pf(wx, vf); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + wr32a(wx, WX_PXMBMEM(vf), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer */ + /* set mirrored mailbox flags */ + wr32a(wx, WX_PXMBMEM(vf), WX_VXMAILBOX_SIZE, WX_PXMAILBOX_STS); + wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_STS); + + return 0; +} + +/** + * wx_read_mbx_pf - Read a message from the mailbox + * @wx: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf: the VF index + * + * Return: return 0 on success and -EBUSY on failure + **/ +int wx_read_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf) +{ + struct wx_mbx_info *mbx = &wx->mbx; + int ret; + u16 i; + + /* limit read to size of mailbox and mbx->size is up to 15 */ + if (size > mbx->size) + size = mbx->size; + + /* lock the mailbox to prevent pf/vf race condition */ + ret = wx_obtain_mbx_lock_pf(wx, vf); + if (ret) + return ret; + + for (i = 0; i < size; i++) + msg[i] = rd32a(wx, WX_PXMBMEM(vf), i); + + /* Acknowledge the message and release buffer */ + /* set mirrored mailbox flags */ + wr32a(wx, WX_PXMBMEM(vf), WX_VXMAILBOX_SIZE, WX_PXMAILBOX_ACK); + wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_ACK); + + return 0; +} + +/** + * wx_check_for_rst_pf - checks to see if the VF has reset + * @wx: pointer to the HW structure + * @vf: the VF index + * + * Return: return 0 on success and -EBUSY on failure + **/ +int wx_check_for_rst_pf(struct wx *wx, u16 vf) +{ + u32 reg_offset = WX_VF_REG_OFFSET(vf); + u32 vf_shift = WX_VF_IND_SHIFT(vf); + u32 vflre = 0; + + vflre = rd32(wx, WX_VFLRE(reg_offset)); + if (!(vflre & BIT(vf_shift))) + return -EBUSY; + wr32(wx, WX_VFLREC(reg_offset), BIT(vf_shift)); + + return 0; +} diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.h b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h new file mode 100644 index 000000000000..05aae138dbc3 --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ +#ifndef _WX_MBX_H_ +#define _WX_MBX_H_ + +#define WX_VXMAILBOX_SIZE 15 + +/* PF Registers */ +#define WX_PXMAILBOX(i) (0x600 + (4 * (i))) /* i=[0,63] */ +#define WX_PXMAILBOX_STS BIT(0) /* Initiate message send to VF */ +#define WX_PXMAILBOX_ACK BIT(1) /* Ack message recv'd from VF */ +#define WX_PXMAILBOX_PFU BIT(3) /* PF owns the mailbox buffer */ + +#define WX_PXMBMEM(i) (0x5000 + (64 * (i))) /* i=[0,63] */ + +#define WX_VFLRE(i) (0x4A0 + (4 * (i))) /* i=[0,1] */ +#define WX_VFLREC(i) (0x4A8 + (4 * (i))) /* i=[0,1] */ + +/* SR-IOV specific macros */ +#define WX_MBVFICR(i) (0x480 + (4 * (i))) /* i=[0,3] */ +#define WX_MBVFICR_VFREQ_MASK GENMASK(15, 0) +#define WX_MBVFICR_VFACK_MASK GENMASK(31, 16) + +#define WX_VT_MSGTYPE_ACK BIT(31) +#define WX_VT_MSGTYPE_NACK BIT(30) +#define WX_VT_MSGTYPE_CTS BIT(29) +#define WX_VT_MSGINFO_SHIFT 16 +#define WX_VT_MSGINFO_MASK GENMASK(23, 16) + +enum wx_pfvf_api_rev { + wx_mbox_api_null, + wx_mbox_api_13 = 4, /* API version 1.3 */ + wx_mbox_api_unknown, /* indicates that API version is not known */ +}; + +/* mailbox API */ +#define WX_VF_RESET 0x01 /* VF requests reset */ +#define WX_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define WX_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define WX_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ +#define WX_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define WX_VF_SET_MACVLAN 0x06 /* VF requests PF unicast filter */ +#define WX_VF_API_NEGOTIATE 0x08 /* negotiate API version */ +#define WX_VF_GET_QUEUES 0x09 /* get queue configuration */ +#define WX_VF_GET_RETA 0x0a /* VF request for RETA */ +#define WX_VF_GET_RSS_KEY 0x0b /* get RSS key */ +#define WX_VF_UPDATE_XCAST_MODE 0x0c +#define WX_VF_GET_LINK_STATE 0x10 /* get vf link state */ +#define WX_VF_GET_FW_VERSION 0x11 /* get fw version */ + +#define WX_VF_BACKUP 0x8001 /* VF requests backup */ + +#define WX_PF_CONTROL_MSG BIT(8) /* PF control message */ +#define WX_PF_NOFITY_VF_LINK_STATUS 0x1 +#define WX_PF_NOFITY_VF_NET_NOT_RUNNING BIT(31) + +#define WX_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define WX_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define WX_VF_TRANS_VLAN 3 /* Indication of port vlan */ +#define WX_VF_DEF_QUEUE 4 /* Default queue offset */ + +#define WX_VF_PERMADDR_MSG_LEN 4 + +enum wxvf_xcast_modes { + WXVF_XCAST_MODE_NONE = 0, + WXVF_XCAST_MODE_MULTI, + WXVF_XCAST_MODE_ALLMULTI, + WXVF_XCAST_MODE_PROMISC, +}; + +int wx_write_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf); +int wx_read_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf); +int wx_check_for_rst_pf(struct wx *wx, u16 mbx_id); +int wx_check_for_msg_pf(struct wx *wx, u16 mbx_id); +int wx_check_for_ack_pf(struct wx *wx, u16 mbx_id); + +#endif /* _WX_MBX_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ptp.c b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c new file mode 100644 index 000000000000..2c39b879f977 --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c @@ -0,0 +1,905 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ +/* Copyright (c) 1999 - 2025 Intel Corporation. */ + +#include <linux/ptp_classify.h> +#include <linux/clocksource.h> +#include <linux/pci.h> + +#include "wx_type.h" +#include "wx_ptp.h" +#include "wx_hw.h" + +#define WX_INCVAL_10GB 0xCCCCCC +#define WX_INCVAL_1GB 0x800000 +#define WX_INCVAL_100 0xA00000 +#define WX_INCVAL_10 0xC7F380 +#define WX_INCVAL_EM 0x2000000 +#define WX_INCVAL_AML 0xA00000 + +#define WX_INCVAL_SHIFT_10GB 20 +#define WX_INCVAL_SHIFT_1GB 18 +#define WX_INCVAL_SHIFT_100 15 +#define WX_INCVAL_SHIFT_10 12 +#define WX_INCVAL_SHIFT_EM 22 +#define WX_INCVAL_SHIFT_AML 21 + +#define WX_OVERFLOW_PERIOD (HZ * 30) +#define WX_PTP_TX_TIMEOUT (HZ) + +#define WX_1588_PPS_WIDTH_EM 120 + +#define WX_NS_PER_SEC 1000000000ULL + +static u64 wx_ptp_timecounter_cyc2time(struct wx *wx, u64 timestamp) +{ + unsigned int seq; + u64 ns; + + do { + seq = read_seqbegin(&wx->hw_tc_lock); + ns = timecounter_cyc2time(&wx->hw_tc, timestamp); + } while (read_seqretry(&wx->hw_tc_lock, seq)); + + return ns; +} + +static u64 wx_ptp_readtime(struct wx *wx, struct ptp_system_timestamp *sts) +{ + u32 timeh1, timeh2, timel; + + timeh1 = rd32ptp(wx, WX_TSC_1588_SYSTIMH); + ptp_read_system_prets(sts); + timel = rd32ptp(wx, WX_TSC_1588_SYSTIML); + ptp_read_system_postts(sts); + timeh2 = rd32ptp(wx, WX_TSC_1588_SYSTIMH); + + if (timeh1 != timeh2) { + ptp_read_system_prets(sts); + timel = rd32ptp(wx, WX_TSC_1588_SYSTIML); + ptp_read_system_prets(sts); + } + return (u64)timel | (u64)timeh2 << 32; +} + +static int wx_ptp_adjfine(struct ptp_clock_info *ptp, long ppb) +{ + struct wx *wx = container_of(ptp, struct wx, ptp_caps); + u64 incval, mask; + + smp_mb(); /* Force any pending update before accessing. */ + incval = READ_ONCE(wx->base_incval); + incval = adjust_by_scaled_ppm(incval, ppb); + + mask = (wx->mac.type == wx_mac_em) ? 0x7FFFFFF : 0xFFFFFF; + incval &= mask; + if (wx->mac.type != wx_mac_em) + incval |= 2 << 24; + + wr32ptp(wx, WX_TSC_1588_INC, incval); + + return 0; +} + +static int wx_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct wx *wx = container_of(ptp, struct wx, ptp_caps); + unsigned long flags; + + write_seqlock_irqsave(&wx->hw_tc_lock, flags); + timecounter_adjtime(&wx->hw_tc, delta); + write_sequnlock_irqrestore(&wx->hw_tc_lock, flags); + + if (wx->ptp_setup_sdp) + wx->ptp_setup_sdp(wx); + + return 0; +} + +static int wx_ptp_gettimex64(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct wx *wx = container_of(ptp, struct wx, ptp_caps); + u64 ns, stamp; + + stamp = wx_ptp_readtime(wx, sts); + ns = wx_ptp_timecounter_cyc2time(wx, stamp); + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int wx_ptp_settime64(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct wx *wx = container_of(ptp, struct wx, ptp_caps); + unsigned long flags; + u64 ns; + + ns = timespec64_to_ns(ts); + /* reset the timecounter */ + write_seqlock_irqsave(&wx->hw_tc_lock, flags); + timecounter_init(&wx->hw_tc, &wx->hw_cc, ns); + write_sequnlock_irqrestore(&wx->hw_tc_lock, flags); + + if (wx->ptp_setup_sdp) + wx->ptp_setup_sdp(wx); + + return 0; +} + +/** + * wx_ptp_clear_tx_timestamp - utility function to clear Tx timestamp state + * @wx: the private board structure + * + * This function should be called whenever the state related to a Tx timestamp + * needs to be cleared. This helps ensure that all related bits are reset for + * the next Tx timestamp event. + */ +static void wx_ptp_clear_tx_timestamp(struct wx *wx) +{ + rd32ptp(wx, WX_TSC_1588_STMPH); + if (wx->ptp_tx_skb) { + dev_kfree_skb_any(wx->ptp_tx_skb); + wx->ptp_tx_skb = NULL; + } + clear_bit_unlock(WX_STATE_PTP_TX_IN_PROGRESS, wx->state); +} + +/** + * wx_ptp_convert_to_hwtstamp - convert register value to hw timestamp + * @wx: private board structure + * @hwtstamp: stack timestamp structure + * @timestamp: unsigned 64bit system time value + * + * We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value + * which can be used by the stack's ptp functions. + * + * The lock is used to protect consistency of the cyclecounter and the SYSTIME + * registers. However, it does not need to protect against the Rx or Tx + * timestamp registers, as there can't be a new timestamp until the old one is + * unlatched by reading. + * + * In addition to the timestamp in hardware, some controllers need a software + * overflow cyclecounter, and this function takes this into account as well. + **/ +static void wx_ptp_convert_to_hwtstamp(struct wx *wx, + struct skb_shared_hwtstamps *hwtstamp, + u64 timestamp) +{ + u64 ns; + + ns = wx_ptp_timecounter_cyc2time(wx, timestamp); + hwtstamp->hwtstamp = ns_to_ktime(ns); +} + +/** + * wx_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @wx: the private board struct + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +static void wx_ptp_tx_hwtstamp(struct wx *wx) +{ + struct skb_shared_hwtstamps shhwtstamps; + struct sk_buff *skb = wx->ptp_tx_skb; + u64 regval = 0; + + regval |= (u64)rd32ptp(wx, WX_TSC_1588_STMPL); + regval |= (u64)rd32ptp(wx, WX_TSC_1588_STMPH) << 32; + + wx_ptp_convert_to_hwtstamp(wx, &shhwtstamps, regval); + + wx->ptp_tx_skb = NULL; + clear_bit_unlock(WX_STATE_PTP_TX_IN_PROGRESS, wx->state); + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); + wx->tx_hwtstamp_pkts++; +} + +static int wx_ptp_tx_hwtstamp_work(struct wx *wx) +{ + u32 tsynctxctl; + + /* we have to have a valid skb to poll for a timestamp */ + if (!wx->ptp_tx_skb) { + wx_ptp_clear_tx_timestamp(wx); + return 0; + } + + /* stop polling once we have a valid timestamp */ + tsynctxctl = rd32ptp(wx, WX_TSC_1588_CTL); + if (tsynctxctl & WX_TSC_1588_CTL_VALID) { + wx_ptp_tx_hwtstamp(wx); + return 0; + } + + return -1; +} + +/** + * wx_ptp_overflow_check - watchdog task to detect SYSTIME overflow + * @wx: pointer to wx struct + * + * this watchdog task periodically reads the timecounter + * in order to prevent missing when the system time registers wrap + * around. This needs to be run approximately twice a minute for the fastest + * overflowing hardware. We run it for all hardware since it shouldn't have a + * large impact. + */ +static void wx_ptp_overflow_check(struct wx *wx) +{ + bool timeout = time_is_before_jiffies(wx->last_overflow_check + + WX_OVERFLOW_PERIOD); + unsigned long flags; + + if (timeout) { + /* Update the timecounter */ + write_seqlock_irqsave(&wx->hw_tc_lock, flags); + timecounter_read(&wx->hw_tc); + write_sequnlock_irqrestore(&wx->hw_tc_lock, flags); + + wx->last_overflow_check = jiffies; + } +} + +/** + * wx_ptp_rx_hang - detect error case when Rx timestamp registers latched + * @wx: pointer to wx struct + * + * this watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to + * timestamp any future packets. + */ +static void wx_ptp_rx_hang(struct wx *wx) +{ + struct wx_ring *rx_ring; + unsigned long rx_event; + u32 tsyncrxctl; + int n; + + tsyncrxctl = rd32(wx, WX_PSR_1588_CTL); + + /* if we don't have a valid timestamp in the registers, just update the + * timeout counter and exit + */ + if (!(tsyncrxctl & WX_PSR_1588_CTL_VALID)) { + wx->last_rx_ptp_check = jiffies; + return; + } + + /* determine the most recent watchdog or rx_timestamp event */ + rx_event = wx->last_rx_ptp_check; + for (n = 0; n < wx->num_rx_queues; n++) { + rx_ring = wx->rx_ring[n]; + if (time_after(rx_ring->last_rx_timestamp, rx_event)) + rx_event = rx_ring->last_rx_timestamp; + } + + /* only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5 * HZ)) { + rd32(wx, WX_PSR_1588_STMPH); + wx->last_rx_ptp_check = jiffies; + + wx->rx_hwtstamp_cleared++; + dev_warn(&wx->pdev->dev, "clearing RX Timestamp hang"); + } +} + +/** + * wx_ptp_tx_hang - detect error case where Tx timestamp never finishes + * @wx: private network wx structure + */ +static void wx_ptp_tx_hang(struct wx *wx) +{ + bool timeout = time_is_before_jiffies(wx->ptp_tx_start + + WX_PTP_TX_TIMEOUT); + + if (!wx->ptp_tx_skb) + return; + + if (!test_bit(WX_STATE_PTP_TX_IN_PROGRESS, wx->state)) + return; + + /* If we haven't received a timestamp within the timeout, it is + * reasonable to assume that it will never occur, so we can unlock the + * timestamp bit when this occurs. + */ + if (timeout) { + wx_ptp_clear_tx_timestamp(wx); + wx->tx_hwtstamp_timeouts++; + dev_warn(&wx->pdev->dev, "clearing Tx timestamp hang\n"); + } +} + +static long wx_ptp_do_aux_work(struct ptp_clock_info *ptp) +{ + struct wx *wx = container_of(ptp, struct wx, ptp_caps); + int ts_done; + + ts_done = wx_ptp_tx_hwtstamp_work(wx); + + wx_ptp_overflow_check(wx); + if (unlikely(test_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, + wx->flags))) + wx_ptp_rx_hang(wx); + wx_ptp_tx_hang(wx); + + return ts_done ? 1 : HZ; +} + +static u64 wx_ptp_trigger_calc(struct wx *wx) +{ + struct cyclecounter *cc = &wx->hw_cc; + unsigned long flags; + u64 ns = 0; + u32 rem; + + /* Read the current clock time, and save the cycle counter value */ + write_seqlock_irqsave(&wx->hw_tc_lock, flags); + ns = timecounter_read(&wx->hw_tc); + wx->pps_edge_start = wx->hw_tc.cycle_last; + write_sequnlock_irqrestore(&wx->hw_tc_lock, flags); + wx->pps_edge_end = wx->pps_edge_start; + + /* Figure out how far past the next second we are */ + div_u64_rem(ns, WX_NS_PER_SEC, &rem); + + /* Figure out how many nanoseconds to add to round the clock edge up + * to the next full second + */ + rem = (WX_NS_PER_SEC - rem); + + /* Adjust the clock edge to align with the next full second. */ + wx->pps_edge_start += div_u64(((u64)rem << cc->shift), cc->mult); + wx->pps_edge_end += div_u64(((u64)(rem + wx->pps_width) << + cc->shift), cc->mult); + + return (ns + rem); +} + +static int wx_ptp_setup_sdp(struct wx *wx) +{ + struct cyclecounter *cc = &wx->hw_cc; + u32 tsauxc; + u64 nsec; + + if (wx->pps_width >= WX_NS_PER_SEC) { + wx_err(wx, "PTP pps width cannot be longer than 1s!\n"); + return -EINVAL; + } + + /* disable the pin first */ + wr32ptp(wx, WX_TSC_1588_AUX_CTL, 0); + WX_WRITE_FLUSH(wx); + + if (!test_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags)) { + if (wx->pps_enabled) { + wx->pps_enabled = false; + wx_set_pps(wx, false, 0, 0); + } + return 0; + } + + wx->pps_enabled = true; + nsec = wx_ptp_trigger_calc(wx); + wx_set_pps(wx, wx->pps_enabled, nsec, wx->pps_edge_start); + + tsauxc = WX_TSC_1588_AUX_CTL_PLSG | WX_TSC_1588_AUX_CTL_EN_TT0 | + WX_TSC_1588_AUX_CTL_EN_TT1 | WX_TSC_1588_AUX_CTL_EN_TS0; + wr32ptp(wx, WX_TSC_1588_TRGT_L(0), (u32)wx->pps_edge_start); + wr32ptp(wx, WX_TSC_1588_TRGT_H(0), (u32)(wx->pps_edge_start >> 32)); + wr32ptp(wx, WX_TSC_1588_TRGT_L(1), (u32)wx->pps_edge_end); + wr32ptp(wx, WX_TSC_1588_TRGT_H(1), (u32)(wx->pps_edge_end >> 32)); + wr32ptp(wx, WX_TSC_1588_SDP(0), + WX_TSC_1588_SDP_FUN_SEL_TT0 | WX_TSC_1588_SDP_OUT_LEVEL_H); + wr32ptp(wx, WX_TSC_1588_SDP(1), WX_TSC_1588_SDP_FUN_SEL_TS0); + wr32ptp(wx, WX_TSC_1588_AUX_CTL, tsauxc); + wr32ptp(wx, WX_TSC_1588_INT_EN, WX_TSC_1588_INT_EN_TT1); + WX_WRITE_FLUSH(wx); + + /* Adjust the clock edge to align with the next full second. */ + wx->sec_to_cc = div_u64(((u64)WX_NS_PER_SEC << cc->shift), cc->mult); + + return 0; +} + +static int wx_ptp_feature_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct wx *wx = container_of(ptp, struct wx, ptp_caps); + + /** + * When PPS is enabled, unmask the interrupt for the ClockOut + * feature, so that the interrupt handler can send the PPS + * event when the clock SDP triggers. Clear mask when PPS is + * disabled + */ + if (rq->type != PTP_CLK_REQ_PEROUT || !wx->ptp_setup_sdp) + return -EOPNOTSUPP; + + /* Reject requests with unsupported flags */ + if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE | + PTP_PEROUT_PHASE)) + return -EOPNOTSUPP; + + if (rq->perout.phase.sec || rq->perout.phase.nsec) { + wx_err(wx, "Absolute start time not supported.\n"); + return -EINVAL; + } + + if (rq->perout.period.sec != 1 || rq->perout.period.nsec) { + wx_err(wx, "Only 1pps is supported.\n"); + return -EINVAL; + } + + if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) { + struct timespec64 ts_on; + + ts_on.tv_sec = rq->perout.on.sec; + ts_on.tv_nsec = rq->perout.on.nsec; + wx->pps_width = timespec64_to_ns(&ts_on); + } else { + wx->pps_width = 120000000; + } + + if (on) + set_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags); + else + clear_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags); + + return wx->ptp_setup_sdp(wx); +} + +void wx_ptp_check_pps_event(struct wx *wx) +{ + u32 tsauxc, int_status; + + /* this check is necessary in case the interrupt was enabled via some + * alternative means (ex. debug_fs). Better to check here than + * everywhere that calls this function. + */ + if (!wx->ptp_clock) + return; + + int_status = rd32ptp(wx, WX_TSC_1588_INT_ST); + if (int_status & WX_TSC_1588_INT_ST_TT1) { + /* disable the pin first */ + wr32ptp(wx, WX_TSC_1588_AUX_CTL, 0); + WX_WRITE_FLUSH(wx); + + wx_ptp_trigger_calc(wx); + + tsauxc = WX_TSC_1588_AUX_CTL_PLSG | WX_TSC_1588_AUX_CTL_EN_TT0 | + WX_TSC_1588_AUX_CTL_EN_TT1 | WX_TSC_1588_AUX_CTL_EN_TS0; + wr32ptp(wx, WX_TSC_1588_TRGT_L(0), (u32)wx->pps_edge_start); + wr32ptp(wx, WX_TSC_1588_TRGT_H(0), (u32)(wx->pps_edge_start >> 32)); + wr32ptp(wx, WX_TSC_1588_TRGT_L(1), (u32)wx->pps_edge_end); + wr32ptp(wx, WX_TSC_1588_TRGT_H(1), (u32)(wx->pps_edge_end >> 32)); + wr32ptp(wx, WX_TSC_1588_AUX_CTL, tsauxc); + WX_WRITE_FLUSH(wx); + } +} +EXPORT_SYMBOL(wx_ptp_check_pps_event); + +static long wx_ptp_create_clock(struct wx *wx) +{ + struct net_device *netdev = wx->netdev; + long err; + + /* do nothing if we already have a clock device */ + if (!IS_ERR_OR_NULL(wx->ptp_clock)) + return 0; + + snprintf(wx->ptp_caps.name, sizeof(wx->ptp_caps.name), + "%s", netdev->name); + wx->ptp_caps.owner = THIS_MODULE; + wx->ptp_caps.n_alarm = 0; + wx->ptp_caps.n_ext_ts = 0; + wx->ptp_caps.pps = 0; + wx->ptp_caps.adjfine = wx_ptp_adjfine; + wx->ptp_caps.adjtime = wx_ptp_adjtime; + wx->ptp_caps.gettimex64 = wx_ptp_gettimex64; + wx->ptp_caps.settime64 = wx_ptp_settime64; + wx->ptp_caps.do_aux_work = wx_ptp_do_aux_work; + switch (wx->mac.type) { + case wx_mac_aml: + case wx_mac_aml40: + wx->ptp_caps.max_adj = 250000000; + wx->ptp_caps.n_per_out = 1; + wx->ptp_setup_sdp = wx_ptp_setup_sdp; + wx->ptp_caps.enable = wx_ptp_feature_enable; + break; + case wx_mac_sp: + wx->ptp_caps.max_adj = 250000000; + wx->ptp_caps.n_per_out = 0; + wx->ptp_setup_sdp = NULL; + break; + case wx_mac_em: + wx->ptp_caps.max_adj = 500000000; + wx->ptp_caps.n_per_out = 1; + wx->ptp_setup_sdp = wx_ptp_setup_sdp; + wx->ptp_caps.enable = wx_ptp_feature_enable; + break; + default: + return -EOPNOTSUPP; + } + + wx->ptp_clock = ptp_clock_register(&wx->ptp_caps, &wx->pdev->dev); + if (IS_ERR(wx->ptp_clock)) { + err = PTR_ERR(wx->ptp_clock); + wx->ptp_clock = NULL; + wx_err(wx, "ptp clock register failed\n"); + return err; + } else if (wx->ptp_clock) { + dev_info(&wx->pdev->dev, "registered PHC device on %s\n", + netdev->name); + } + + /* Set the default timestamp mode to disabled here. We do this in + * create_clock instead of initialization, because we don't want to + * override the previous settings during a suspend/resume cycle. + */ + wx->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + wx->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + return 0; +} + +static int wx_ptp_set_timestamp_mode(struct wx *wx, + struct kernel_hwtstamp_config *config) +{ + u32 tsync_tx_ctl = WX_TSC_1588_CTL_ENABLED; + u32 tsync_rx_ctl = WX_PSR_1588_CTL_ENABLED; + DECLARE_BITMAP(flags, WX_PF_FLAGS_NBITS); + u32 tsync_rx_mtrl = PTP_EV_PORT << 16; + bool is_l2 = false; + u32 regval; + + memcpy(flags, wx->flags, sizeof(wx->flags)); + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + break; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + tsync_rx_mtrl = 0; + clear_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags); + clear_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= WX_PSR_1588_CTL_TYPE_L4_V1; + tsync_rx_mtrl |= WX_PSR_1588_MSG_V1_SYNC; + set_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags); + set_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= WX_PSR_1588_CTL_TYPE_L4_V1; + tsync_rx_mtrl |= WX_PSR_1588_MSG_V1_DELAY_REQ; + set_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags); + set_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags); + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + tsync_rx_ctl |= WX_PSR_1588_CTL_TYPE_EVENT_V2; + is_l2 = true; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + set_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags); + set_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags); + break; + default: + /* register PSR_1588_MSG must be set in order to do V1 packets, + * therefore it is not possible to time stamp both V1 Sync and + * Delay_Req messages unless hardware supports timestamping all + * packets => return error + */ + config->rx_filter = HWTSTAMP_FILTER_NONE; + return -ERANGE; + } + + /* define ethertype filter for timestamping L2 packets */ + if (is_l2) + wr32(wx, WX_PSR_ETYPE_SWC(WX_PSR_ETYPE_SWC_FILTER_1588), + (WX_PSR_ETYPE_SWC_FILTER_EN | /* enable filter */ + WX_PSR_ETYPE_SWC_1588 | /* enable timestamping */ + ETH_P_1588)); /* 1588 eth protocol type */ + else + wr32(wx, WX_PSR_ETYPE_SWC(WX_PSR_ETYPE_SWC_FILTER_1588), 0); + + /* enable/disable TX */ + regval = rd32ptp(wx, WX_TSC_1588_CTL); + regval &= ~WX_TSC_1588_CTL_ENABLED; + regval |= tsync_tx_ctl; + wr32ptp(wx, WX_TSC_1588_CTL, regval); + + /* enable/disable RX */ + regval = rd32(wx, WX_PSR_1588_CTL); + regval &= ~(WX_PSR_1588_CTL_ENABLED | WX_PSR_1588_CTL_TYPE_MASK); + regval |= tsync_rx_ctl; + wr32(wx, WX_PSR_1588_CTL, regval); + + /* define which PTP packets are time stamped */ + wr32(wx, WX_PSR_1588_MSG, tsync_rx_mtrl); + + WX_WRITE_FLUSH(wx); + + /* configure adapter flags only when HW is actually configured */ + memcpy(wx->flags, flags, sizeof(wx->flags)); + + /* clear TX/RX timestamp state, just to be sure */ + wx_ptp_clear_tx_timestamp(wx); + rd32(wx, WX_PSR_1588_STMPH); + + return 0; +} + +static u64 wx_ptp_read(const struct cyclecounter *hw_cc) +{ + struct wx *wx = container_of(hw_cc, struct wx, hw_cc); + + return wx_ptp_readtime(wx, NULL); +} + +static void wx_ptp_link_speed_adjust(struct wx *wx, u32 *shift, u32 *incval) +{ + switch (wx->mac.type) { + case wx_mac_aml: + case wx_mac_aml40: + *shift = WX_INCVAL_SHIFT_AML; + *incval = WX_INCVAL_AML; + return; + case wx_mac_em: + *shift = WX_INCVAL_SHIFT_EM; + *incval = WX_INCVAL_EM; + return; + default: + break; + } + + switch (wx->speed) { + case SPEED_10: + *shift = WX_INCVAL_SHIFT_10; + *incval = WX_INCVAL_10; + break; + case SPEED_100: + *shift = WX_INCVAL_SHIFT_100; + *incval = WX_INCVAL_100; + break; + case SPEED_1000: + *shift = WX_INCVAL_SHIFT_1GB; + *incval = WX_INCVAL_1GB; + break; + case SPEED_10000: + default: + *shift = WX_INCVAL_SHIFT_10GB; + *incval = WX_INCVAL_10GB; + break; + } +} + +/** + * wx_ptp_reset_cyclecounter - create the cycle counter from hw + * @wx: pointer to the wx structure + * + * This function should be called to set the proper values for the TSC_1588_INC + * register and tell the cyclecounter structure what the tick rate of SYSTIME + * is. It does not directly modify SYSTIME registers or the timecounter + * structure. It should be called whenever a new TSC_1588_INC value is + * necessary, such as during initialization or when the link speed changes. + */ +void wx_ptp_reset_cyclecounter(struct wx *wx) +{ + u32 incval = 0, mask = 0; + struct cyclecounter cc; + unsigned long flags; + + /* For some of the boards below this mask is technically incorrect. + * The timestamp mask overflows at approximately 61bits. However the + * particular hardware does not overflow on an even bitmask value. + * Instead, it overflows due to conversion of upper 32bits billions of + * cycles. Timecounters are not really intended for this purpose so + * they do not properly function if the overflow point isn't 2^N-1. + * However, the actual SYSTIME values in question take ~138 years to + * overflow. In practice this means they won't actually overflow. A + * proper fix to this problem would require modification of the + * timecounter delta calculations. + */ + cc.mask = CLOCKSOURCE_MASK(64); + cc.mult = 1; + cc.shift = 0; + + cc.read = wx_ptp_read; + wx_ptp_link_speed_adjust(wx, &cc.shift, &incval); + + /* update the base incval used to calculate frequency adjustment */ + WRITE_ONCE(wx->base_incval, incval); + + mask = (wx->mac.type == wx_mac_em) ? 0x7FFFFFF : 0xFFFFFF; + incval &= mask; + if (wx->mac.type != wx_mac_em) + incval |= 2 << 24; + wr32ptp(wx, WX_TSC_1588_INC, incval); + + smp_mb(); /* Force the above update. */ + + /* need lock to prevent incorrect read while modifying cyclecounter */ + write_seqlock_irqsave(&wx->hw_tc_lock, flags); + memcpy(&wx->hw_cc, &cc, sizeof(wx->hw_cc)); + write_sequnlock_irqrestore(&wx->hw_tc_lock, flags); +} +EXPORT_SYMBOL(wx_ptp_reset_cyclecounter); + +void wx_ptp_reset(struct wx *wx) +{ + unsigned long flags; + + /* reset the hardware timestamping mode */ + wx_ptp_set_timestamp_mode(wx, &wx->tstamp_config); + wx_ptp_reset_cyclecounter(wx); + + wr32ptp(wx, WX_TSC_1588_SYSTIML, 0); + wr32ptp(wx, WX_TSC_1588_SYSTIMH, 0); + WX_WRITE_FLUSH(wx); + + write_seqlock_irqsave(&wx->hw_tc_lock, flags); + timecounter_init(&wx->hw_tc, &wx->hw_cc, + ktime_to_ns(ktime_get_real())); + write_sequnlock_irqrestore(&wx->hw_tc_lock, flags); + + wx->last_overflow_check = jiffies; + ptp_schedule_worker(wx->ptp_clock, HZ); + + /* Now that the shift has been calculated and the systime + * registers reset, (re-)enable the Clock out feature + */ + if (wx->ptp_setup_sdp) + wx->ptp_setup_sdp(wx); +} +EXPORT_SYMBOL(wx_ptp_reset); + +void wx_ptp_init(struct wx *wx) +{ + /* Initialize the seqlock_t first, since the user might call the clock + * functions any time after we've initialized the ptp clock device. + */ + seqlock_init(&wx->hw_tc_lock); + + /* obtain a ptp clock device, or re-use an existing device */ + if (wx_ptp_create_clock(wx)) + return; + + wx->tx_hwtstamp_pkts = 0; + wx->tx_hwtstamp_timeouts = 0; + wx->tx_hwtstamp_skipped = 0; + wx->tx_hwtstamp_errors = 0; + wx->rx_hwtstamp_cleared = 0; + /* reset the ptp related hardware bits */ + wx_ptp_reset(wx); + + /* enter the WX_STATE_PTP_RUNNING state */ + set_bit(WX_STATE_PTP_RUNNING, wx->state); +} +EXPORT_SYMBOL(wx_ptp_init); + +/** + * wx_ptp_suspend - stop ptp work items + * @wx: pointer to wx struct + * + * This function suspends ptp activity, and prevents more work from being + * generated, but does not destroy the clock device. + */ +void wx_ptp_suspend(struct wx *wx) +{ + /* leave the WX_STATE_PTP_RUNNING STATE */ + if (!test_and_clear_bit(WX_STATE_PTP_RUNNING, wx->state)) + return; + + clear_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags); + if (wx->ptp_setup_sdp) + wx->ptp_setup_sdp(wx); + + wx_ptp_clear_tx_timestamp(wx); +} +EXPORT_SYMBOL(wx_ptp_suspend); + +/** + * wx_ptp_stop - destroy the ptp_clock device + * @wx: pointer to wx struct + * + * Completely destroy the ptp_clock device, and disable all PTP related + * features. Intended to be run when the device is being closed. + */ +void wx_ptp_stop(struct wx *wx) +{ + /* first, suspend ptp activity */ + wx_ptp_suspend(wx); + + /* now destroy the ptp clock device */ + if (wx->ptp_clock) { + ptp_clock_unregister(wx->ptp_clock); + wx->ptp_clock = NULL; + dev_info(&wx->pdev->dev, "removed PHC on %s\n", wx->netdev->name); + } +} +EXPORT_SYMBOL(wx_ptp_stop); + +/** + * wx_ptp_rx_hwtstamp - utility function which checks for RX time stamp + * @wx: pointer to wx struct + * @skb: particular skb to send timestamp with + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +void wx_ptp_rx_hwtstamp(struct wx *wx, struct sk_buff *skb) +{ + u64 regval = 0; + u32 tsyncrxctl; + + /* Read the tsyncrxctl register afterwards in order to prevent taking an + * I/O hit on every packet. + */ + tsyncrxctl = rd32(wx, WX_PSR_1588_CTL); + if (!(tsyncrxctl & WX_PSR_1588_CTL_VALID)) + return; + + regval |= (u64)rd32(wx, WX_PSR_1588_STMPL); + regval |= (u64)rd32(wx, WX_PSR_1588_STMPH) << 32; + + wx_ptp_convert_to_hwtstamp(wx, skb_hwtstamps(skb), regval); +} + +int wx_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *cfg) +{ + struct wx *wx = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + *cfg = wx->tstamp_config; + + return 0; +} +EXPORT_SYMBOL(wx_hwtstamp_get); + +int wx_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) +{ + struct wx *wx = netdev_priv(dev); + int err; + + if (!netif_running(dev)) + return -EINVAL; + + err = wx_ptp_set_timestamp_mode(wx, cfg); + if (err) + return err; + + /* save these settings for future reference */ + memcpy(&wx->tstamp_config, cfg, sizeof(wx->tstamp_config)); + + return 0; +} +EXPORT_SYMBOL(wx_hwtstamp_set); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ptp.h b/drivers/net/ethernet/wangxun/libwx/wx_ptp.h new file mode 100644 index 000000000000..50db90a6e3ee --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_ptp.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019 - 2025 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _WX_PTP_H_ +#define _WX_PTP_H_ + +void wx_ptp_check_pps_event(struct wx *wx); +void wx_ptp_reset_cyclecounter(struct wx *wx); +void wx_ptp_reset(struct wx *wx); +void wx_ptp_init(struct wx *wx); +void wx_ptp_suspend(struct wx *wx); +void wx_ptp_stop(struct wx *wx); +void wx_ptp_rx_hwtstamp(struct wx *wx, struct sk_buff *skb); +int wx_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *cfg); +int wx_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack); + +#endif /* _WX_PTP_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c new file mode 100644 index 000000000000..e8656d9d733b --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c @@ -0,0 +1,909 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/etherdevice.h> +#include <linux/pci.h> + +#include "wx_type.h" +#include "wx_hw.h" +#include "wx_mbx.h" +#include "wx_sriov.h" + +static void wx_vf_configuration(struct pci_dev *pdev, int event_mask) +{ + bool enable = !!WX_VF_ENABLE_CHECK(event_mask); + struct wx *wx = pci_get_drvdata(pdev); + u32 vfn = WX_VF_NUM_GET(event_mask); + + if (enable) + eth_zero_addr(wx->vfinfo[vfn].vf_mac_addr); +} + +static int wx_alloc_vf_macvlans(struct wx *wx, u8 num_vfs) +{ + struct vf_macvlans *mv_list; + int num_vf_macvlans, i; + + /* Initialize list of VF macvlans */ + INIT_LIST_HEAD(&wx->vf_mvs.mvlist); + + num_vf_macvlans = wx->mac.num_rar_entries - + (WX_MAX_PF_MACVLANS + 1 + num_vfs); + if (!num_vf_macvlans) + return -EINVAL; + + mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans), + GFP_KERNEL); + if (!mv_list) + return -ENOMEM; + + for (i = 0; i < num_vf_macvlans; i++) { + mv_list[i].vf = -1; + mv_list[i].free = true; + list_add(&mv_list[i].mvlist, &wx->vf_mvs.mvlist); + } + wx->mv_list = mv_list; + + return 0; +} + +static void wx_sriov_clear_data(struct wx *wx) +{ + /* set num VFs to 0 to prevent access to vfinfo */ + wx->num_vfs = 0; + + /* free VF control structures */ + kfree(wx->vfinfo); + wx->vfinfo = NULL; + + /* free macvlan list */ + kfree(wx->mv_list); + wx->mv_list = NULL; + + /* set default pool back to 0 */ + wr32m(wx, WX_PSR_VM_CTL, WX_PSR_VM_CTL_POOL_MASK, 0); + wx->ring_feature[RING_F_VMDQ].offset = 0; + + clear_bit(WX_FLAG_SRIOV_ENABLED, wx->flags); + /* Disable VMDq flag so device will be set in NM mode */ + if (wx->ring_feature[RING_F_VMDQ].limit == 1) + clear_bit(WX_FLAG_VMDQ_ENABLED, wx->flags); +} + +static int __wx_enable_sriov(struct wx *wx, u8 num_vfs) +{ + int i, ret = 0; + u32 value = 0; + + set_bit(WX_FLAG_SRIOV_ENABLED, wx->flags); + dev_info(&wx->pdev->dev, "SR-IOV enabled with %d VFs\n", num_vfs); + + /* Enable VMDq flag so device will be set in VM mode */ + set_bit(WX_FLAG_VMDQ_ENABLED, wx->flags); + if (!wx->ring_feature[RING_F_VMDQ].limit) + wx->ring_feature[RING_F_VMDQ].limit = 1; + wx->ring_feature[RING_F_VMDQ].offset = num_vfs; + + wx->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage), + GFP_KERNEL); + if (!wx->vfinfo) + return -ENOMEM; + + ret = wx_alloc_vf_macvlans(wx, num_vfs); + if (ret) + return ret; + + /* Initialize default switching mode VEB */ + wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_SW_EN, WX_PSR_CTL_SW_EN); + + for (i = 0; i < num_vfs; i++) { + /* enable spoof checking for all VFs */ + wx->vfinfo[i].spoofchk_enabled = true; + wx->vfinfo[i].link_enable = true; + /* untrust all VFs */ + wx->vfinfo[i].trusted = false; + /* set the default xcast mode */ + wx->vfinfo[i].xcast_mode = WXVF_XCAST_MODE_NONE; + } + + if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { + value = WX_CFG_PORT_CTL_NUM_VT_8; + } else { + if (num_vfs < 32) + value = WX_CFG_PORT_CTL_NUM_VT_32; + else + value = WX_CFG_PORT_CTL_NUM_VT_64; + } + wr32m(wx, WX_CFG_PORT_CTL, + WX_CFG_PORT_CTL_NUM_VT_MASK, + value); + + return ret; +} + +static void wx_sriov_reinit(struct wx *wx) +{ + rtnl_lock(); + wx->setup_tc(wx->netdev, netdev_get_num_tc(wx->netdev)); + rtnl_unlock(); +} + +void wx_disable_sriov(struct wx *wx) +{ + if (!pci_vfs_assigned(wx->pdev)) + pci_disable_sriov(wx->pdev); + else + wx_err(wx, "Unloading driver while VFs are assigned.\n"); + + /* clear flags and free allloced data */ + wx_sriov_clear_data(wx); +} +EXPORT_SYMBOL(wx_disable_sriov); + +static int wx_pci_sriov_enable(struct pci_dev *dev, + int num_vfs) +{ + struct wx *wx = pci_get_drvdata(dev); + int err = 0, i; + + err = __wx_enable_sriov(wx, num_vfs); + if (err) + return err; + + wx->num_vfs = num_vfs; + for (i = 0; i < wx->num_vfs; i++) + wx_vf_configuration(dev, (i | WX_VF_ENABLE)); + + /* reset before enabling SRIOV to avoid mailbox issues */ + wx_sriov_reinit(wx); + + err = pci_enable_sriov(dev, num_vfs); + if (err) { + wx_err(wx, "Failed to enable PCI sriov: %d\n", err); + goto err_out; + } + + return num_vfs; +err_out: + wx_sriov_clear_data(wx); + return err; +} + +static void wx_pci_sriov_disable(struct pci_dev *dev) +{ + struct wx *wx = pci_get_drvdata(dev); + + wx_disable_sriov(wx); + wx_sriov_reinit(wx); +} + +int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct wx *wx = pci_get_drvdata(pdev); + int err; + + if (!num_vfs) { + if (!pci_vfs_assigned(pdev)) { + wx_pci_sriov_disable(pdev); + return 0; + } + + wx_err(wx, "can't free VFs because some are assigned to VMs.\n"); + return -EBUSY; + } + + err = wx_pci_sriov_enable(pdev, num_vfs); + if (err) + return err; + + return num_vfs; +} +EXPORT_SYMBOL(wx_pci_sriov_configure); + +static int wx_set_vf_mac(struct wx *wx, u16 vf, const u8 *mac_addr) +{ + u8 hw_addr[ETH_ALEN]; + int ret = 0; + + ether_addr_copy(hw_addr, mac_addr); + wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf); + ret = wx_add_mac_filter(wx, hw_addr, vf); + if (ret >= 0) + ether_addr_copy(wx->vfinfo[vf].vf_mac_addr, mac_addr); + else + eth_zero_addr(wx->vfinfo[vf].vf_mac_addr); + + return ret; +} + +static void wx_set_vmolr(struct wx *wx, u16 vf, bool aupe) +{ + u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf)); + + vmolr |= WX_PSR_VM_L2CTL_BAM; + if (aupe) + vmolr |= WX_PSR_VM_L2CTL_AUPE; + else + vmolr &= ~WX_PSR_VM_L2CTL_AUPE; + wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr); +} + +static void wx_set_vmvir(struct wx *wx, u16 vid, u16 qos, u16 vf) +{ + u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | + WX_TDM_VLAN_INS_VLANA_DEFAULT; + + wr32(wx, WX_TDM_VLAN_INS(vf), vmvir); +} + +static int wx_set_vf_vlan(struct wx *wx, int add, int vid, u16 vf) +{ + if (!vid && !add) + return 0; + + return wx_set_vfta(wx, vid, vf, (bool)add); +} + +static void wx_set_vlan_anti_spoofing(struct wx *wx, bool enable, int vf) +{ + u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf); + u32 pfvfspoof; + + pfvfspoof = rd32(wx, WX_TDM_VLAN_AS(index)); + if (enable) + pfvfspoof |= BIT(vf_bit); + else + pfvfspoof &= ~BIT(vf_bit); + wr32(wx, WX_TDM_VLAN_AS(index), pfvfspoof); +} + +static void wx_write_qde(struct wx *wx, u32 vf, u32 qde) +{ + struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ]; + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + u32 reg = 0, n = vf * q_per_pool / 32; + u32 i = vf * q_per_pool; + + reg = rd32(wx, WX_RDM_PF_QDE(n)); + for (i = (vf * q_per_pool - n * 32); + i < ((vf + 1) * q_per_pool - n * 32); + i++) { + if (qde == 1) + reg |= qde << i; + else + reg &= qde << i; + } + + wr32(wx, WX_RDM_PF_QDE(n), reg); +} + +static void wx_clear_vmvir(struct wx *wx, u32 vf) +{ + wr32(wx, WX_TDM_VLAN_INS(vf), 0); +} + +static void wx_ping_vf(struct wx *wx, int vf) +{ + u32 ping = WX_PF_CONTROL_MSG; + + if (wx->vfinfo[vf].clear_to_send) + ping |= WX_VT_MSGTYPE_CTS; + wx_write_mbx_pf(wx, &ping, 1, vf); +} + +static void wx_set_vf_rx_tx(struct wx *wx, int vf) +{ + u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf); + u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx; + + reg_cur_tx = rd32(wx, WX_TDM_VF_TE(index)); + reg_cur_rx = rd32(wx, WX_RDM_VF_RE(index)); + + if (wx->vfinfo[vf].link_enable) { + reg_req_tx = reg_cur_tx | BIT(vf_bit); + reg_req_rx = reg_cur_rx | BIT(vf_bit); + /* Enable particular VF */ + if (reg_cur_tx != reg_req_tx) + wr32(wx, WX_TDM_VF_TE(index), reg_req_tx); + if (reg_cur_rx != reg_req_rx) + wr32(wx, WX_RDM_VF_RE(index), reg_req_rx); + } else { + reg_req_tx = BIT(vf_bit); + reg_req_rx = BIT(vf_bit); + /* Disable particular VF */ + if (reg_cur_tx & reg_req_tx) + wr32(wx, WX_TDM_VFTE_CLR(index), reg_req_tx); + if (reg_cur_rx & reg_req_rx) + wr32(wx, WX_RDM_VFRE_CLR(index), reg_req_rx); + } +} + +static int wx_get_vf_queues(struct wx *wx, u32 *msgbuf, u32 vf) +{ + struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ]; + unsigned int default_tc = 0; + + msgbuf[WX_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); + msgbuf[WX_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); + + if (wx->vfinfo[vf].pf_vlan || wx->vfinfo[vf].pf_qos) + msgbuf[WX_VF_TRANS_VLAN] = 1; + else + msgbuf[WX_VF_TRANS_VLAN] = 0; + + /* notify VF of default queue */ + msgbuf[WX_VF_DEF_QUEUE] = default_tc; + + return 0; +} + +static void wx_vf_reset_event(struct wx *wx, u16 vf) +{ + struct vf_data_storage *vfinfo = &wx->vfinfo[vf]; + u8 num_tcs = netdev_get_num_tc(wx->netdev); + + /* add PF assigned VLAN */ + wx_set_vf_vlan(wx, true, vfinfo->pf_vlan, vf); + + /* reset offloads to defaults */ + wx_set_vmolr(wx, vf, !vfinfo->pf_vlan); + + /* set outgoing tags for VFs */ + if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { + wx_clear_vmvir(wx, vf); + } else { + if (vfinfo->pf_qos || !num_tcs) + wx_set_vmvir(wx, vfinfo->pf_vlan, + vfinfo->pf_qos, vf); + else + wx_set_vmvir(wx, vfinfo->pf_vlan, + wx->default_up, vf); + } + + /* reset multicast table array for vf */ + wx->vfinfo[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + wx_set_rx_mode(wx->netdev); + + wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf); + /* reset VF api back to unknown */ + wx->vfinfo[vf].vf_api = wx_mbox_api_null; +} + +static void wx_vf_reset_msg(struct wx *wx, u16 vf) +{ + const u8 *vf_mac = wx->vfinfo[vf].vf_mac_addr; + struct net_device *dev = wx->netdev; + u32 msgbuf[5] = {0, 0, 0, 0, 0}; + u8 *addr = (u8 *)(&msgbuf[1]); + u32 reg = 0, index, vf_bit; + int pf_max_frame; + + /* reset the filters for the device */ + wx_vf_reset_event(wx, vf); + + /* set vf mac address */ + if (!is_zero_ether_addr(vf_mac)) + wx_set_vf_mac(wx, vf, vf_mac); + + index = WX_VF_REG_OFFSET(vf); + vf_bit = WX_VF_IND_SHIFT(vf); + + /* force drop enable for all VF Rx queues */ + wx_write_qde(wx, vf, 1); + + /* set transmit and receive for vf */ + wx_set_vf_rx_tx(wx, vf); + + pf_max_frame = dev->mtu + ETH_HLEN; + + if (pf_max_frame > ETH_FRAME_LEN) + reg = BIT(vf_bit); + wr32(wx, WX_RDM_VFRE_CLR(index), reg); + + /* enable VF mailbox for further messages */ + wx->vfinfo[vf].clear_to_send = true; + + /* reply to reset with ack and vf mac address */ + msgbuf[0] = WX_VF_RESET; + if (!is_zero_ether_addr(vf_mac)) { + msgbuf[0] |= WX_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + } else { + msgbuf[0] |= WX_VT_MSGTYPE_NACK; + wx_err(wx, "VF %d has no MAC address assigned", vf); + } + + msgbuf[3] = wx->mac.mc_filter_type; + wx_write_mbx_pf(wx, msgbuf, WX_VF_PERMADDR_MSG_LEN, vf); +} + +static int wx_set_vf_mac_addr(struct wx *wx, u32 *msgbuf, u16 vf) +{ + const u8 *new_mac = ((u8 *)(&msgbuf[1])); + int ret; + + if (!is_valid_ether_addr(new_mac)) { + wx_err(wx, "VF %d attempted to set invalid mac\n", vf); + return -EINVAL; + } + + if (wx->vfinfo[vf].pf_set_mac && + memcmp(wx->vfinfo[vf].vf_mac_addr, new_mac, ETH_ALEN)) { + wx_err(wx, + "VF %d attempt to set a MAC but it already had a MAC.", + vf); + return -EBUSY; + } + + ret = wx_set_vf_mac(wx, vf, new_mac); + if (ret < 0) + return ret; + + return 0; +} + +static void wx_set_vf_multicasts(struct wx *wx, u32 *msgbuf, u32 vf) +{ + struct vf_data_storage *vfinfo = &wx->vfinfo[vf]; + u16 entries = (msgbuf[0] & WX_VT_MSGINFO_MASK) + >> WX_VT_MSGINFO_SHIFT; + u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf)); + u32 vector_bit, vector_reg, mta_reg, i; + u16 *hash_list = (u16 *)&msgbuf[1]; + + /* only so many hash values supported */ + entries = min_t(u16, entries, WX_MAX_VF_MC_ENTRIES); + vfinfo->num_vf_mc_hashes = entries; + + for (i = 0; i < entries; i++) + vfinfo->vf_mc_hashes[i] = hash_list[i]; + + for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { + vector_reg = WX_PSR_MC_TBL_REG(vfinfo->vf_mc_hashes[i]); + vector_bit = WX_PSR_MC_TBL_BIT(vfinfo->vf_mc_hashes[i]); + mta_reg = wx->mac.mta_shadow[vector_reg]; + mta_reg |= BIT(vector_bit); + wx->mac.mta_shadow[vector_reg] = mta_reg; + wr32(wx, WX_PSR_MC_TBL(vector_reg), mta_reg); + } + vmolr |= WX_PSR_VM_L2CTL_ROMPE; + wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr); +} + +static void wx_set_vf_lpe(struct wx *wx, u32 max_frame, u32 vf) +{ + u32 index, vf_bit, vfre; + u32 max_frs, reg_val; + + /* determine VF receive enable location */ + index = WX_VF_REG_OFFSET(vf); + vf_bit = WX_VF_IND_SHIFT(vf); + + vfre = rd32(wx, WX_RDM_VF_RE(index)); + vfre |= BIT(vf_bit); + wr32(wx, WX_RDM_VF_RE(index), vfre); + + /* pull current max frame size from hardware */ + max_frs = DIV_ROUND_UP(max_frame, 1024); + reg_val = rd32(wx, WX_MAC_WDG_TIMEOUT) & WX_MAC_WDG_TIMEOUT_WTO_MASK; + if (max_frs > (reg_val + WX_MAC_WDG_TIMEOUT_WTO_DELTA)) + wr32(wx, WX_MAC_WDG_TIMEOUT, + max_frs - WX_MAC_WDG_TIMEOUT_WTO_DELTA); +} + +static int wx_find_vlvf_entry(struct wx *wx, u32 vlan) +{ + int regindex; + u32 vlvf; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the vlan id in the VLVF entries */ + for (regindex = 1; regindex < WX_PSR_VLAN_SWC_ENTRIES; regindex++) { + wr32(wx, WX_PSR_VLAN_SWC_IDX, regindex); + vlvf = rd32(wx, WX_PSR_VLAN_SWC); + if ((vlvf & VLAN_VID_MASK) == vlan) + break; + } + + /* Return a negative value if not found */ + if (regindex >= WX_PSR_VLAN_SWC_ENTRIES) + regindex = -EINVAL; + + return regindex; +} + +static int wx_set_vf_macvlan(struct wx *wx, + u16 vf, int index, unsigned char *mac_addr) +{ + struct vf_macvlans *entry; + struct list_head *pos; + int retval = 0; + + if (index <= 1) { + list_for_each(pos, &wx->vf_mvs.mvlist) { + entry = list_entry(pos, struct vf_macvlans, mvlist); + if (entry->vf == vf) { + entry->vf = -1; + entry->free = true; + entry->is_macvlan = false; + wx_del_mac_filter(wx, entry->vf_macvlan, vf); + } + } + } + + if (!index) + return 0; + + entry = NULL; + list_for_each(pos, &wx->vf_mvs.mvlist) { + entry = list_entry(pos, struct vf_macvlans, mvlist); + if (entry->free) + break; + } + + if (!entry || !entry->free) + return -ENOSPC; + + retval = wx_add_mac_filter(wx, mac_addr, vf); + if (retval >= 0) { + entry->free = false; + entry->is_macvlan = true; + entry->vf = vf; + memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); + } + + return retval; +} + +static int wx_set_vf_vlan_msg(struct wx *wx, u32 *msgbuf, u16 vf) +{ + int add = (msgbuf[0] & WX_VT_MSGINFO_MASK) >> WX_VT_MSGINFO_SHIFT; + int vid = (msgbuf[1] & WX_PSR_VLAN_SWC_VLANID_MASK); + int ret; + + if (add) + wx->vfinfo[vf].vlan_count++; + else if (wx->vfinfo[vf].vlan_count) + wx->vfinfo[vf].vlan_count--; + + /* in case of promiscuous mode any VLAN filter set for a VF must + * also have the PF pool added to it. + */ + if (add && wx->netdev->flags & IFF_PROMISC) + wx_set_vf_vlan(wx, add, vid, VMDQ_P(0)); + + ret = wx_set_vf_vlan(wx, add, vid, vf); + if (!ret && wx->vfinfo[vf].spoofchk_enabled) + wx_set_vlan_anti_spoofing(wx, true, vf); + + /* Go through all the checks to see if the VLAN filter should + * be wiped completely. + */ + if (!add && wx->netdev->flags & IFF_PROMISC) { + u32 bits = 0, vlvf; + int reg_ndx; + + reg_ndx = wx_find_vlvf_entry(wx, vid); + if (reg_ndx < 0) + return -ENOSPC; + wr32(wx, WX_PSR_VLAN_SWC_IDX, reg_ndx); + vlvf = rd32(wx, WX_PSR_VLAN_SWC); + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + if (VMDQ_P(0) < 32) { + bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L); + bits &= ~BIT(VMDQ_P(0)); + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) + bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H); + } else { + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) + bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H); + bits &= ~BIT(VMDQ_P(0) % 32); + bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L); + } + /* If the filter was removed then ensure PF pool bit + * is cleared if the PF only added itself to the pool + * because the PF is in promiscuous mode. + */ + if ((vlvf & VLAN_VID_MASK) == vid && !bits) + wx_set_vf_vlan(wx, add, vid, VMDQ_P(0)); + } + + return 0; +} + +static int wx_set_vf_macvlan_msg(struct wx *wx, u32 *msgbuf, u16 vf) +{ + int index = (msgbuf[0] & WX_VT_MSGINFO_MASK) >> + WX_VT_MSGINFO_SHIFT; + u8 *new_mac = ((u8 *)(&msgbuf[1])); + int err; + + if (wx->vfinfo[vf].pf_set_mac && index > 0) { + wx_err(wx, "VF %d request MACVLAN filter but is denied\n", vf); + return -EINVAL; + } + + /* An non-zero index indicates the VF is setting a filter */ + if (index) { + if (!is_valid_ether_addr(new_mac)) { + wx_err(wx, "VF %d attempted to set invalid mac\n", vf); + return -EINVAL; + } + /* If the VF is allowed to set MAC filters then turn off + * anti-spoofing to avoid false positives. + */ + if (wx->vfinfo[vf].spoofchk_enabled) + wx_set_vf_spoofchk(wx->netdev, vf, false); + } + + err = wx_set_vf_macvlan(wx, vf, index, new_mac); + if (err == -ENOSPC) + wx_err(wx, + "VF %d request MACVLAN filter but there is no space\n", + vf); + if (err < 0) + return err; + + return 0; +} + +static int wx_negotiate_vf_api(struct wx *wx, u32 *msgbuf, u32 vf) +{ + int api = msgbuf[1]; + + switch (api) { + case wx_mbox_api_13: + wx->vfinfo[vf].vf_api = api; + return 0; + default: + wx_err(wx, "VF %d requested invalid api version %u\n", vf, api); + return -EINVAL; + } +} + +static int wx_get_vf_link_state(struct wx *wx, u32 *msgbuf, u32 vf) +{ + msgbuf[1] = wx->vfinfo[vf].link_enable; + + return 0; +} + +static int wx_get_fw_version(struct wx *wx, u32 *msgbuf, u32 vf) +{ + unsigned long fw_version = 0ULL; + int ret = 0; + + ret = kstrtoul(wx->eeprom_id, 16, &fw_version); + if (ret) + return -EOPNOTSUPP; + msgbuf[1] = fw_version; + + return 0; +} + +static int wx_update_vf_xcast_mode(struct wx *wx, u32 *msgbuf, u32 vf) +{ + int xcast_mode = msgbuf[1]; + u32 vmolr, disable, enable; + + if (wx->vfinfo[vf].xcast_mode == xcast_mode) + return 0; + + switch (xcast_mode) { + case WXVF_XCAST_MODE_NONE: + disable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE | + WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE | + WX_PSR_VM_L2CTL_VPE; + enable = 0; + break; + case WXVF_XCAST_MODE_MULTI: + disable = WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE | + WX_PSR_VM_L2CTL_VPE; + enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE; + break; + case WXVF_XCAST_MODE_ALLMULTI: + disable = WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE; + enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE | + WX_PSR_VM_L2CTL_MPE; + break; + case WXVF_XCAST_MODE_PROMISC: + disable = 0; + enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE | + WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE | + WX_PSR_VM_L2CTL_VPE; + break; + default: + return -EOPNOTSUPP; + } + + vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf)); + vmolr &= ~disable; + vmolr |= enable; + wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr); + + wx->vfinfo[vf].xcast_mode = xcast_mode; + msgbuf[1] = xcast_mode; + + return 0; +} + +static void wx_rcv_msg_from_vf(struct wx *wx, u16 vf) +{ + u16 mbx_size = WX_VXMAILBOX_SIZE; + u32 msgbuf[WX_VXMAILBOX_SIZE]; + int retval; + + retval = wx_read_mbx_pf(wx, msgbuf, mbx_size, vf); + if (retval) { + wx_err(wx, "Error receiving message from VF\n"); + return; + } + + /* this is a message we already processed, do nothing */ + if (msgbuf[0] & (WX_VT_MSGTYPE_ACK | WX_VT_MSGTYPE_NACK)) + return; + + if (msgbuf[0] == WX_VF_RESET) { + wx_vf_reset_msg(wx, vf); + return; + } + + /* until the vf completes a virtual function reset it should not be + * allowed to start any configuration. + */ + if (!wx->vfinfo[vf].clear_to_send) { + msgbuf[0] |= WX_VT_MSGTYPE_NACK; + wx_write_mbx_pf(wx, msgbuf, 1, vf); + return; + } + + switch ((msgbuf[0] & U16_MAX)) { + case WX_VF_SET_MAC_ADDR: + retval = wx_set_vf_mac_addr(wx, msgbuf, vf); + break; + case WX_VF_SET_MULTICAST: + wx_set_vf_multicasts(wx, msgbuf, vf); + retval = 0; + break; + case WX_VF_SET_VLAN: + retval = wx_set_vf_vlan_msg(wx, msgbuf, vf); + break; + case WX_VF_SET_LPE: + wx_set_vf_lpe(wx, msgbuf[1], vf); + retval = 0; + break; + case WX_VF_SET_MACVLAN: + retval = wx_set_vf_macvlan_msg(wx, msgbuf, vf); + break; + case WX_VF_API_NEGOTIATE: + retval = wx_negotiate_vf_api(wx, msgbuf, vf); + break; + case WX_VF_GET_QUEUES: + retval = wx_get_vf_queues(wx, msgbuf, vf); + break; + case WX_VF_GET_LINK_STATE: + retval = wx_get_vf_link_state(wx, msgbuf, vf); + break; + case WX_VF_GET_FW_VERSION: + retval = wx_get_fw_version(wx, msgbuf, vf); + break; + case WX_VF_UPDATE_XCAST_MODE: + retval = wx_update_vf_xcast_mode(wx, msgbuf, vf); + break; + case WX_VF_BACKUP: + break; + default: + wx_err(wx, "Unhandled Msg %8.8x\n", msgbuf[0]); + break; + } + + /* notify the VF of the results of what it sent us */ + if (retval) + msgbuf[0] |= WX_VT_MSGTYPE_NACK; + else + msgbuf[0] |= WX_VT_MSGTYPE_ACK; + + msgbuf[0] |= WX_VT_MSGTYPE_CTS; + + wx_write_mbx_pf(wx, msgbuf, mbx_size, vf); +} + +static void wx_rcv_ack_from_vf(struct wx *wx, u16 vf) +{ + u32 msg = WX_VT_MSGTYPE_NACK; + + /* if device isn't clear to send it shouldn't be reading either */ + if (!wx->vfinfo[vf].clear_to_send) + wx_write_mbx_pf(wx, &msg, 1, vf); +} + +void wx_msg_task(struct wx *wx) +{ + u16 vf; + + for (vf = 0; vf < wx->num_vfs; vf++) { + /* process any reset requests */ + if (!wx_check_for_rst_pf(wx, vf)) + wx_vf_reset_event(wx, vf); + + /* process any messages pending */ + if (!wx_check_for_msg_pf(wx, vf)) + wx_rcv_msg_from_vf(wx, vf); + + /* process any acks */ + if (!wx_check_for_ack_pf(wx, vf)) + wx_rcv_ack_from_vf(wx, vf); + } +} +EXPORT_SYMBOL(wx_msg_task); + +void wx_disable_vf_rx_tx(struct wx *wx) +{ + wr32(wx, WX_TDM_VFTE_CLR(0), U32_MAX); + wr32(wx, WX_RDM_VFRE_CLR(0), U32_MAX); + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { + wr32(wx, WX_TDM_VFTE_CLR(1), U32_MAX); + wr32(wx, WX_RDM_VFRE_CLR(1), U32_MAX); + } +} +EXPORT_SYMBOL(wx_disable_vf_rx_tx); + +void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up) +{ + u32 msgbuf[2] = {0, 0}; + u16 i; + + if (!wx->num_vfs) + return; + msgbuf[0] = WX_PF_NOFITY_VF_LINK_STATUS | WX_PF_CONTROL_MSG; + if (link_up) + msgbuf[1] = FIELD_PREP(GENMASK(31, 1), wx->speed) | link_up; + if (wx->notify_down) + msgbuf[1] |= WX_PF_NOFITY_VF_NET_NOT_RUNNING; + for (i = 0; i < wx->num_vfs; i++) { + if (wx->vfinfo[i].clear_to_send) + msgbuf[0] |= WX_VT_MSGTYPE_CTS; + wx_write_mbx_pf(wx, msgbuf, 2, i); + } +} +EXPORT_SYMBOL(wx_ping_all_vfs_with_link_status); + +static void wx_set_vf_link_state(struct wx *wx, int vf, int state) +{ + wx->vfinfo[vf].link_state = state; + switch (state) { + case IFLA_VF_LINK_STATE_AUTO: + if (netif_running(wx->netdev)) + wx->vfinfo[vf].link_enable = true; + else + wx->vfinfo[vf].link_enable = false; + break; + case IFLA_VF_LINK_STATE_ENABLE: + wx->vfinfo[vf].link_enable = true; + break; + case IFLA_VF_LINK_STATE_DISABLE: + wx->vfinfo[vf].link_enable = false; + break; + } + /* restart the VF */ + wx->vfinfo[vf].clear_to_send = false; + wx_ping_vf(wx, vf); + + wx_set_vf_rx_tx(wx, vf); +} + +void wx_set_all_vfs(struct wx *wx) +{ + int i; + + for (i = 0; i < wx->num_vfs; i++) + wx_set_vf_link_state(wx, i, wx->vfinfo[i].link_state); +} +EXPORT_SYMBOL(wx_set_all_vfs); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.h b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h new file mode 100644 index 000000000000..8a3a47bb5815 --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _WX_SRIOV_H_ +#define _WX_SRIOV_H_ + +#define WX_VF_ENABLE_CHECK(_m) FIELD_GET(BIT(31), (_m)) +#define WX_VF_NUM_GET(_m) FIELD_GET(GENMASK(5, 0), (_m)) +#define WX_VF_ENABLE BIT(31) + +void wx_disable_sriov(struct wx *wx); +int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs); +void wx_msg_task(struct wx *wx); +void wx_disable_vf_rx_tx(struct wx *wx); +void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up); +void wx_set_all_vfs(struct wx *wx); + +#endif /* _WX_SRIOV_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 1fdeb464d5f4..7730c9fc3e02 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -4,6 +4,8 @@ #ifndef _WX_TYPE_H_ #define _WX_TYPE_H_ +#include <linux/ptp_clock_kernel.h> +#include <linux/timecounter.h> #include <linux/bitfield.h> #include <linux/netdevice.h> #include <linux/if_vlan.h> @@ -18,8 +20,13 @@ /* MSI-X capability fields masks */ #define WX_PCIE_MSIX_TBL_SZ_MASK 0x7FF #define WX_PCI_LINK_STATUS 0xB2 +#define WX_MAX_PF_MACVLANS 15 +#define WX_MAX_VF_MC_ENTRIES 30 /**************** Global Registers ****************************/ +#define WX_VF_REG_OFFSET(_v) FIELD_GET(GENMASK(15, 5), (_v)) +#define WX_VF_IND_SHIFT(_v) FIELD_GET(GENMASK(4, 0), (_v)) + /* chip control Registers */ #define WX_MIS_PWR 0x10000 #define WX_MIS_RST 0x1000C @@ -74,6 +81,9 @@ #define WX_MAC_LXONOFFRXC 0x11E0C /*********************** Receive DMA registers **************************/ +#define WX_RDM_VF_RE(_i) (0x12004 + ((_i) * 4)) +#define WX_RDM_PF_QDE(_i) (0x12080 + ((_i) * 4)) +#define WX_RDM_VFRE_CLR(_i) (0x120A0 + ((_i) * 4)) #define WX_RDM_DRP_PKT 0x12500 #define WX_RDM_PKT_CNT 0x12504 #define WX_RDM_BYTE_CNT_LSB 0x12508 @@ -82,12 +92,17 @@ /************************* Port Registers ************************************/ /* port cfg Registers */ #define WX_CFG_PORT_CTL 0x14400 +#define WX_CFG_PORT_CTL_PFRSTD BIT(14) #define WX_CFG_PORT_CTL_DRV_LOAD BIT(3) #define WX_CFG_PORT_CTL_QINQ BIT(2) #define WX_CFG_PORT_CTL_D_VLAN BIT(0) /* double vlan*/ #define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4)) #define WX_CFG_PORT_CTL_NUM_VT_MASK GENMASK(13, 12) /* number of TVs */ +#define WX_CFG_PORT_CTL_NUM_VT_NONE 0 +#define WX_CFG_PORT_CTL_NUM_VT_8 FIELD_PREP(GENMASK(13, 12), 1) +#define WX_CFG_PORT_CTL_NUM_VT_32 FIELD_PREP(GENMASK(13, 12), 2) +#define WX_CFG_PORT_CTL_NUM_VT_64 FIELD_PREP(GENMASK(13, 12), 3) /* GPIO Registers */ #define WX_GPIO_DR 0x14800 @@ -110,6 +125,11 @@ /*********************** Transmit DMA registers **************************/ /* transmit global control */ #define WX_TDM_CTL 0x18000 +#define WX_TDM_VF_TE(_i) (0x18004 + ((_i) * 4)) +#define WX_TDM_MAC_AS(_i) (0x18060 + ((_i) * 4)) +#define WX_TDM_VLAN_AS(_i) (0x18070 + ((_i) * 4)) +#define WX_TDM_VFTE_CLR(_i) (0x180A0 + ((_i) * 4)) + /* TDM CTL BIT */ #define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */ #define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4)) @@ -157,10 +177,13 @@ #define WX_RDB_RA_CTL_RSS_IPV6_TCP BIT(21) #define WX_RDB_RA_CTL_RSS_IPV4_UDP BIT(22) #define WX_RDB_RA_CTL_RSS_IPV6_UDP BIT(23) +#define WX_RDB_FDIR_MATCH 0x19558 +#define WX_RDB_FDIR_MISS 0x1955C /******************************* PSR Registers *******************************/ /* psr control */ #define WX_PSR_CTL 0x15000 +#define WX_PSR_VM_CTL 0x151B0 /* Header split receive */ #define WX_PSR_CTL_SW_EN BIT(18) #define WX_PSR_CTL_RSC_ACK BIT(17) @@ -178,14 +201,36 @@ #define WX_PSR_VLAN_CTL 0x15088 #define WX_PSR_VLAN_CTL_CFIEN BIT(29) /* bit 29 */ #define WX_PSR_VLAN_CTL_VFE BIT(30) /* bit 30 */ +/* EType Queue Filter */ +#define WX_PSR_ETYPE_SWC(_i) (0x15128 + ((_i) * 4)) +#define WX_PSR_ETYPE_SWC_FILTER_1588 3 +#define WX_PSR_ETYPE_SWC_FILTER_EN BIT(31) +#define WX_PSR_ETYPE_SWC_1588 BIT(30) +/* 1588 */ +#define WX_PSR_1588_MSG 0x15120 +#define WX_PSR_1588_MSG_V1_SYNC FIELD_PREP(GENMASK(7, 0), 0) +#define WX_PSR_1588_MSG_V1_DELAY_REQ FIELD_PREP(GENMASK(7, 0), 1) +#define WX_PSR_1588_STMPL 0x151E8 +#define WX_PSR_1588_STMPH 0x151A4 +#define WX_PSR_1588_CTL 0x15188 +#define WX_PSR_1588_CTL_ENABLED BIT(4) +#define WX_PSR_1588_CTL_TYPE_MASK GENMASK(3, 1) +#define WX_PSR_1588_CTL_TYPE_L4_V1 FIELD_PREP(GENMASK(3, 1), 1) +#define WX_PSR_1588_CTL_TYPE_EVENT_V2 FIELD_PREP(GENMASK(3, 1), 5) +#define WX_PSR_1588_CTL_VALID BIT(0) /* mcasst/ucast overflow tbl */ #define WX_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4)) +#define WX_PSR_MC_TBL_REG(_i) FIELD_GET(GENMASK(11, 5), (_i)) +#define WX_PSR_MC_TBL_BIT(_i) FIELD_GET(GENMASK(4, 0), (_i)) #define WX_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4)) +#define WX_PSR_VM_CTL_REPLEN BIT(30) /* replication enabled */ +#define WX_PSR_VM_CTL_POOL_MASK GENMASK(12, 7) /* VM L2 contorl */ #define WX_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4)) #define WX_PSR_VM_L2CTL_UPE BIT(4) /* unicast promiscuous */ #define WX_PSR_VM_L2CTL_VACC BIT(6) /* accept nomatched vlan */ +#define WX_PSR_VM_L2CTL_VPE BIT(7) /* vlan promiscuous mode */ #define WX_PSR_VM_L2CTL_AUPE BIT(8) /* accept untagged packets */ #define WX_PSR_VM_L2CTL_ROMPE BIT(9) /* accept packets in MTA tbl */ #define WX_PSR_VM_L2CTL_ROPE BIT(10) /* accept packets in UC tbl */ @@ -224,10 +269,12 @@ #define WX_PSR_VLAN_SWC 0x16220 #define WX_PSR_VLAN_SWC_VM_L 0x16224 #define WX_PSR_VLAN_SWC_VM_H 0x16228 +#define WX_PSR_VLAN_SWC_VM(_i) (0x16224 + ((_i) * 4)) #define WX_PSR_VLAN_SWC_IDX 0x16230 /* 64 vlan entries */ /* VLAN pool filtering masks */ #define WX_PSR_VLAN_SWC_VIEN BIT(31) /* filter is valid */ #define WX_PSR_VLAN_SWC_ENTRIES 64 +#define WX_PSR_VLAN_SWC_VLANID_MASK GENMASK(11, 0) /********************************* RSEC **************************************/ /* general rsec */ @@ -238,6 +285,13 @@ #define WX_RSC_ST 0x17004 #define WX_RSC_ST_RSEC_RDY BIT(0) +/*********************** Transmit DMA registers **************************/ +/* transmit global control */ +#define WX_TDM_ETYPE_AS(_i) (0x18058 + ((_i) * 4)) +#define WX_TDM_VLAN_INS(_i) (0x18100 + ((_i) * 4)) +/* Per VF Port VLAN insertion rules */ +#define WX_TDM_VLAN_INS_VLANA_DEFAULT BIT(30) /* Always use default VLAN*/ + /****************************** TDB ******************************************/ #define WX_TDB_PB_SZ(_i) (0x1CC00 + ((_i) * 4)) #define WX_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ @@ -251,6 +305,32 @@ #define WX_TSC_ST_SECTX_RDY BIT(0) #define WX_TSC_BUF_AE 0x1D00C #define WX_TSC_BUF_AE_THR GENMASK(9, 0) +/* 1588 */ +#define WX_TSC_1588_CTL 0x11F00 +#define WX_TSC_1588_CTL_ENABLED BIT(4) +#define WX_TSC_1588_CTL_VALID BIT(0) +#define WX_TSC_1588_STMPL 0x11F04 +#define WX_TSC_1588_STMPH 0x11F08 +#define WX_TSC_1588_SYSTIML 0x11F0C +#define WX_TSC_1588_SYSTIMH 0x11F10 +#define WX_TSC_1588_INC 0x11F14 +#define WX_TSC_1588_INT_ST 0x11F20 +#define WX_TSC_1588_INT_ST_TT1 BIT(5) +#define WX_TSC_1588_INT_EN 0x11F24 +#define WX_TSC_1588_INT_EN_TT1 BIT(5) +#define WX_TSC_1588_AUX_CTL 0x11F28 +#define WX_TSC_1588_AUX_CTL_EN_TS0 BIT(8) +#define WX_TSC_1588_AUX_CTL_EN_TT1 BIT(2) +#define WX_TSC_1588_AUX_CTL_PLSG BIT(1) +#define WX_TSC_1588_AUX_CTL_EN_TT0 BIT(0) +#define WX_TSC_1588_TRGT_L(i) (0x11F2C + ((i) * 8)) /* [0,1] */ +#define WX_TSC_1588_TRGT_H(i) (0x11F30 + ((i) * 8)) /* [0,1] */ +#define WX_TSC_1588_SDP(i) (0x11F5C + ((i) * 4)) /* [0,3] */ +#define WX_TSC_1588_SDP_OUT_LEVEL_H FIELD_PREP(BIT(4), 0) +#define WX_TSC_1588_SDP_OUT_LEVEL_L FIELD_PREP(BIT(4), 1) +#define WX_TSC_1588_SDP_FUN_SEL_MASK GENMASK(2, 0) +#define WX_TSC_1588_SDP_FUN_SEL_TT0 FIELD_PREP(WX_TSC_1588_SDP_FUN_SEL_MASK, 1) +#define WX_TSC_1588_SDP_FUN_SEL_TS0 FIELD_PREP(WX_TSC_1588_SDP_FUN_SEL_MASK, 5) /************************************** MNG ********************************/ #define WX_MNG_SWFW_SYNC 0x1E008 @@ -262,6 +342,10 @@ #define WX_MNG_MBOX_CTL_FWRDY BIT(2) #define WX_MNG_BMC2OS_CNT 0x1E090 #define WX_MNG_OS2BMC_CNT 0x1E094 +#define WX_SW2FW_MBOX_CMD 0x1E0A0 +#define WX_SW2FW_MBOX_CMD_VLD BIT(31) +#define WX_SW2FW_MBOX 0x1E200 +#define WX_FW2SW_MBOX 0x1E300 /************************************* ETH MAC *****************************/ #define WX_MAC_TX_CFG 0x11000 @@ -277,6 +361,9 @@ #define WX_MAC_WDG_TIMEOUT 0x1100C #define WX_MAC_RX_FLOW_CTRL 0x11090 #define WX_MAC_RX_FLOW_CTRL_RFE BIT(0) /* receive fc enable */ + +#define WX_MAC_WDG_TIMEOUT_WTO_MASK GENMASK(3, 0) +#define WX_MAC_WDG_TIMEOUT_WTO_DELTA 2 /* MDIO Registers */ #define WX_MSCA 0x11200 #define WX_MSCA_RA(v) FIELD_PREP(U16_MAX, v) @@ -325,6 +412,7 @@ enum WX_MSCA_CMD_value { #define WX_12K_ITR 336 #define WX_20K_ITR 200 #define WX_SP_MAX_EITR 0x00000FF8U +#define WX_AML_MAX_EITR 0x00000FFFU #define WX_EM_MAX_EITR 0x00007FFCU /* transmit DMA Registers */ @@ -365,9 +453,19 @@ enum WX_MSCA_CMD_value { /* Number of 80 microseconds we wait for PCI Express master disable */ #define WX_PCI_MASTER_DISABLE_TIMEOUT 80000 +#define WX_RSS_64Q_MASK 0x3F +#define WX_RSS_8Q_MASK 0x7 +#define WX_RSS_4Q_MASK 0x3 +#define WX_RSS_2Q_MASK 0x1 +#define WX_RSS_DISABLED_MASK 0x0 + +#define WX_VMDQ_4Q_MASK 0x7C +#define WX_VMDQ_2Q_MASK 0x7E + /****************** Manageablility Host Interface defines ********************/ #define WX_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */ #define WX_HI_COMMAND_TIMEOUT 1000 /* Process HI command limit */ +#define WX_HIC_HDR_INDEX_MAX 255 #define FW_READ_SHADOW_RAM_CMD 0x31 #define FW_READ_SHADOW_RAM_LEN 0x6 @@ -380,6 +478,8 @@ enum WX_MSCA_CMD_value { #define FW_CEM_CMD_RESERVED 0X0 #define FW_CEM_MAX_RETRIES 3 #define FW_CEM_RESP_STATUS_SUCCESS 0x1 +#define FW_PPS_SET_CMD 0xF6 +#define FW_PPS_SET_LEN 0x14 #define WX_SW_REGION_PTR 0x1C @@ -424,12 +524,12 @@ enum WX_MSCA_CMD_value { #define WX_MIN_RXD 128 #define WX_MIN_TXD 128 -/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ -#define WX_REQ_RX_DESCRIPTOR_MULTIPLE 8 -#define WX_REQ_TX_DESCRIPTOR_MULTIPLE 8 +/* Number of Transmit and Receive Descriptors must be a multiple of 128 */ +#define WX_REQ_RX_DESCRIPTOR_MULTIPLE 128 +#define WX_REQ_TX_DESCRIPTOR_MULTIPLE 128 #define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */ -#define VMDQ_P(p) p +#define VMDQ_P(p) ((p) + wx->ring_feature[RING_F_VMDQ].offset) /* Supported Rx Buffer Sizes */ #define WX_RXBUFFER_256 256 /* Used for skb receive header */ @@ -458,6 +558,8 @@ enum WX_MSCA_CMD_value { #define WX_RXD_STAT_L4CS BIT(7) /* L4 xsum calculated */ #define WX_RXD_STAT_IPCS BIT(8) /* IP xsum calculated */ #define WX_RXD_STAT_OUTERIPCS BIT(10) /* Cloud IP xsum calculated*/ +#define WX_RXD_STAT_IPV6EX BIT(12) /* IPv6 Dest Header */ +#define WX_RXD_STAT_TS BIT(14) /* IEEE1588 Time Stamp */ #define WX_RXD_ERR_OUTERIPER BIT(26) /* CRC IP Header error */ #define WX_RXD_ERR_RXE BIT(29) /* Any MAC Error */ @@ -503,10 +605,36 @@ enum WX_MSCA_CMD_value { #define WX_PTYPE_TYP_TCP 0x04 #define WX_PTYPE_TYP_SCTP 0x05 +/* Packet type non-ip values */ +enum wx_l2_ptypes { + WX_PTYPE_L2_ABORTED = (WX_PTYPE_PKT_MAC), + WX_PTYPE_L2_MAC = (WX_PTYPE_PKT_MAC | WX_PTYPE_TYP_MAC), + + WX_PTYPE_L2_IPV4_FRAG = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_IPFRAG), + WX_PTYPE_L2_IPV4 = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_IP), + WX_PTYPE_L2_IPV4_UDP = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_UDP), + WX_PTYPE_L2_IPV4_TCP = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_TCP), + WX_PTYPE_L2_IPV4_SCTP = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_SCTP), + WX_PTYPE_L2_IPV6_FRAG = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 | + WX_PTYPE_TYP_IPFRAG), + WX_PTYPE_L2_IPV6 = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 | + WX_PTYPE_TYP_IP), + WX_PTYPE_L2_IPV6_UDP = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 | + WX_PTYPE_TYP_UDP), + WX_PTYPE_L2_IPV6_TCP = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 | + WX_PTYPE_TYP_TCP), + WX_PTYPE_L2_IPV6_SCTP = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 | + WX_PTYPE_TYP_SCTP), + + WX_PTYPE_L2_TUN4_MAC = (WX_PTYPE_TUN_IPV4 | WX_PTYPE_PKT_IGM), + WX_PTYPE_L2_TUN6_MAC = (WX_PTYPE_TUN_IPV6 | WX_PTYPE_PKT_IGM), +}; + +#define WX_PTYPE_PKT(_pt) ((_pt) & 0x30) +#define WX_PTYPE_TYPL4(_pt) ((_pt) & 0x07) + #define WX_RXD_PKTTYPE(_rxd) \ ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF) -#define WX_RXD_IPV6EX(_rxd) \ - ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 6) & 0x1) /*********************** Transmit Descriptor Config Masks ****************/ #define WX_TXD_STAT_DD BIT(0) /* Descriptor Done */ #define WX_TXD_DTYP_DATA 0 /* Adv Data Descriptor */ @@ -552,6 +680,9 @@ enum wx_tx_flags { WX_TX_FLAGS_OUTER_IPV4 = 0x100, WX_TX_FLAGS_LINKSEC = 0x200, WX_TX_FLAGS_IPSEC = 0x400, + + /* software defined flags */ + WX_TX_FLAGS_SW_VLAN = 0x40, }; /* VLAN info */ @@ -630,21 +761,30 @@ struct wx_hic_hdr { u8 cmd_resv; u8 ret_status; } cmd_or_resp; - u8 checksum; + union { + u8 checksum; + u8 index; + }; }; struct wx_hic_hdr2_req { u8 cmd; u8 buf_lenh; u8 buf_lenl; - u8 checksum; + union { + u8 checksum; + u8 index; + }; }; struct wx_hic_hdr2_rsp { u8 cmd; u8 buf_lenl; u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ - u8 checksum; + union { + u8 checksum; + u8 index; + }; }; union wx_hic_hdr2 { @@ -668,12 +808,25 @@ struct wx_hic_reset { u16 reset_type; }; +struct wx_hic_set_pps { + struct wx_hic_hdr hdr; + u8 lan_id; + u8 enable; + u16 pad2; + u64 nsec; + u64 cycles; +}; + /* Bus parameters */ struct wx_bus_info { u8 func; u16 device; }; +struct wx_mbx_info { + u16 size; +}; + struct wx_thermal_sensor_data { s16 temp; s16 alarm_thresh; @@ -683,14 +836,16 @@ struct wx_thermal_sensor_data { enum wx_mac_type { wx_mac_unknown = 0, wx_mac_sp, - wx_mac_em + wx_mac_em, + wx_mac_aml, + wx_mac_aml40, }; -enum sp_media_type { - sp_media_unknown = 0, - sp_media_fiber, - sp_media_copper, - sp_media_backplane +enum wx_media_type { + wx_media_unknown = 0, + wx_media_fiber, + wx_media_copper, + wx_media_backplane }; enum em_mac_type { @@ -830,6 +985,7 @@ struct wx_tx_context_desc { */ struct wx_tx_buffer { union wx_tx_desc *next_to_watch; + unsigned long time_stamp; struct sk_buff *skb; unsigned int bytecount; unsigned short gso_segs; @@ -891,6 +1047,7 @@ struct wx_ring { unsigned int size; /* length in bytes */ u16 count; /* amount of descriptors */ + unsigned long last_rx_timestamp; u8 queue_index; /* needed for multiqueue queue management */ u8 reg_idx; /* holds the special value that gets @@ -900,7 +1057,13 @@ struct wx_ring { */ u16 next_to_use; u16 next_to_clean; - u16 next_to_alloc; + union { + u16 next_to_alloc; + struct { + u8 atr_sample_rate; + u8 atr_count; + }; + }; struct wx_queue_stats stats; struct u64_stats_sync syncp; @@ -938,7 +1101,9 @@ struct wx_ring_feature { enum wx_ring_f_enum { RING_F_NONE = 0, + RING_F_VMDQ, RING_F_RSS, + RING_F_FDIR, RING_F_ARRAY_SIZE /* must be last in enum set */ }; @@ -980,19 +1145,78 @@ struct wx_hw_stats { u64 crcerrs; u64 rlec; u64 qmprc; + u64 fdirmatch; + u64 fdirmiss; +}; + +enum wx_state { + WX_STATE_RESETTING, + WX_STATE_SWFW_BUSY, + WX_STATE_PTP_RUNNING, + WX_STATE_PTP_TX_IN_PROGRESS, + WX_STATE_SERVICE_SCHED, + WX_STATE_NBITS /* must be last */ +}; + +struct vf_data_storage { + struct pci_dev *vfdev; + unsigned char vf_mac_addr[ETH_ALEN]; + bool spoofchk_enabled; + bool link_enable; + bool trusted; + int xcast_mode; + unsigned int vf_api; + bool clear_to_send; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + bool pf_set_mac; + + u16 vf_mc_hashes[WX_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + u16 vlan_count; + int link_state; +}; + +struct vf_macvlans { + struct list_head mvlist; + int vf; + bool free; + bool is_macvlan; + u8 vf_macvlan[ETH_ALEN]; +}; + +enum wx_pf_flags { + WX_FLAG_MULTI_64_FUNC, + WX_FLAG_SWFW_RING, + WX_FLAG_VMDQ_ENABLED, + WX_FLAG_VLAN_PROMISC, + WX_FLAG_SRIOV_ENABLED, + WX_FLAG_FDIR_CAPABLE, + WX_FLAG_FDIR_HASH, + WX_FLAG_FDIR_PERFECT, + WX_FLAG_RSC_CAPABLE, + WX_FLAG_RX_HWTSTAMP_ENABLED, + WX_FLAG_RX_HWTSTAMP_IN_REGISTER, + WX_FLAG_PTP_PPS_ENABLED, + WX_FLAG_NEED_LINK_CONFIG, + WX_FLAG_NEED_SFP_RESET, + WX_PF_FLAGS_NBITS /* must be last */ }; struct wx { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + DECLARE_BITMAP(state, WX_STATE_NBITS); + DECLARE_BITMAP(flags, WX_PF_FLAGS_NBITS); void *priv; u8 __iomem *hw_addr; struct pci_dev *pdev; struct net_device *netdev; struct wx_bus_info bus; + struct wx_mbx_info mbx; struct wx_mac_info mac; enum em_mac_type mac_type; - enum sp_media_type media_type; + enum wx_media_type media_type; struct wx_eeprom_info eeprom; struct wx_addr_filter_info addr_ctrl; struct wx_fc_info fc; @@ -1010,8 +1234,12 @@ struct wx { char eeprom_id[32]; char *driver_name; enum wx_reset_type reset_type; + u8 swfw_index; /* PHY stuff */ + bool notify_down; + int adv_speed; + int adv_duplex; unsigned int link; int speed; int duplex; @@ -1043,6 +1271,8 @@ struct wx { struct wx_ring *tx_ring[64] ____cacheline_aligned_in_smp; struct wx_ring *rx_ring[64]; struct wx_q_vector *q_vector[64]; + int num_rx_pools; + int num_rx_queues_per_pool; unsigned int queues_per_pool; struct msix_entry *msix_q_entries; @@ -1053,6 +1283,7 @@ struct wx { dma_addr_t isb_dma; u32 *isb_mem; u32 isb_tag[WX_ISB_MAX]; + bool misc_irq_domain; #define WX_MAX_RETA_ENTRIES 128 #define WX_RSS_INDIR_TBL_MAX 64 @@ -1063,6 +1294,7 @@ struct wx { u32 wol; u16 bd_number; + bool default_up; struct wx_hw_stats stats; u64 tx_busy; @@ -1071,6 +1303,43 @@ struct wx { u64 hw_csum_rx_good; u64 hw_csum_rx_error; u64 alloc_rx_buff_failed; + unsigned int num_vfs; + struct vf_data_storage *vfinfo; + struct vf_macvlans vf_mvs; + struct vf_macvlans *mv_list; + unsigned long fwd_bitmask; + + u32 atr_sample_rate; + void (*atr)(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype); + void (*configure_fdir)(struct wx *wx); + int (*setup_tc)(struct net_device *netdev, u8 tc); + void (*do_reset)(struct net_device *netdev); + int (*ptp_setup_sdp)(struct wx *wx); + + bool pps_enabled; + u64 pps_width; + u64 pps_edge_start; + u64 pps_edge_end; + u64 sec_to_cc; + u32 base_incval; + u32 tx_hwtstamp_pkts; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 tx_hwtstamp_errors; + u32 rx_hwtstamp_cleared; + unsigned long last_overflow_check; + unsigned long last_rx_ptp_check; + unsigned long ptp_tx_start; + seqlock_t hw_tc_lock; /* seqlock for ptp */ + struct cyclecounter hw_cc; + struct timecounter hw_tc; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct kernel_hwtstamp_config tstamp_config; + struct sk_buff *ptp_tx_skb; + + struct timer_list service_timer; + struct work_struct service_task; }; #define WX_INTR_ALL (~0ULL) @@ -1115,6 +1384,24 @@ rd64(struct wx *wx, u32 reg) return (lsb | msb << 32); } +static inline u32 +rd32ptp(struct wx *wx, u32 reg) +{ + if (wx->mac.type == wx_mac_em) + return rd32(wx, reg); + + return rd32(wx, reg + 0xB500); +} + +static inline void +wr32ptp(struct wx *wx, u32 reg, u32 value) +{ + if (wx->mac.type == wx_mac_em) + return wr32(wx, reg, value); + + return wr32(wx, reg + 0xB500, value); +} + /* On some domestic CPU platforms, sometimes IO is not synchronized with * flushing memory, here use readl() to flush PCI read and write. */ @@ -1131,4 +1418,19 @@ static inline struct wx *phylink_to_wx(struct phylink_config *config) return container_of(config, struct wx, phylink_config); } +static inline int wx_set_state_reset(struct wx *wx) +{ + u8 timeout = 50; + + while (test_and_set_bit(WX_STATE_RESETTING, wx->state)) { + timeout--; + if (!timeout) + return -EBUSY; + + usleep_range(1000, 2000); + } + + return 0; +} + #endif /* _WX_TYPE_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index 786a652ae64f..7e2d9ec38a30 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -37,9 +37,9 @@ static int ngbe_set_wol(struct net_device *netdev, wx->wol = 0; if (wol->wolopts & WAKE_MAGIC) wx->wol = WX_PSR_WKUP_CTL_MAG; - netdev->wol_enabled = !!(wx->wol); + netdev->ethtool->wol_enabled = !!(wx->wol); wr32(wx, WX_PSR_WKUP_CTL, wx->wol); - device_set_wakeup_enable(&pdev->dev, netdev->wol_enabled); + device_set_wakeup_enable(&pdev->dev, netdev->ethtool->wol_enabled); return 0; } @@ -52,7 +52,7 @@ static int ngbe_set_ringparam(struct net_device *netdev, struct wx *wx = netdev_priv(netdev); u32 new_rx_count, new_tx_count; struct wx_ring *temp_ring; - int i; + int i, err = 0; new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD); new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE); @@ -64,6 +64,10 @@ static int ngbe_set_ringparam(struct net_device *netdev, new_rx_count == wx->rx_ring_count) return 0; + err = wx_set_state_reset(wx); + if (err) + return err; + if (!netif_running(wx->netdev)) { for (i = 0; i < wx->num_tx_queues; i++) wx->tx_ring[i]->count = new_tx_count; @@ -72,14 +76,16 @@ static int ngbe_set_ringparam(struct net_device *netdev, wx->tx_ring_count = new_tx_count; wx->rx_ring_count = new_rx_count; - return 0; + goto clear_reset; } /* allocate temporary buffer to store rings in */ i = max_t(int, wx->num_tx_queues, wx->num_rx_queues); temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL); - if (!temp_ring) - return -ENOMEM; + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } ngbe_down(wx); @@ -89,7 +95,9 @@ static int ngbe_set_ringparam(struct net_device *netdev, wx_configure(wx); ngbe_up(wx); - return 0; +clear_reset: + clear_bit(WX_STATE_RESETTING, wx->state); + return err; } static int ngbe_set_channels(struct net_device *dev, @@ -130,6 +138,8 @@ static const struct ethtool_ops ngbe_ethtool_ops = { .set_channels = ngbe_set_channels, .get_msglevel = wx_get_msglevel, .set_msglevel = wx_set_msglevel, + .get_ts_info = wx_get_ts_info, + .get_ts_stats = wx_get_ptp_stats, }; void ngbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index fdd6b4f70b7a..b5022c49dc5e 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -14,6 +14,9 @@ #include "../libwx/wx_type.h" #include "../libwx/wx_hw.h" #include "../libwx/wx_lib.h" +#include "../libwx/wx_ptp.h" +#include "../libwx/wx_mbx.h" +#include "../libwx/wx_sriov.h" #include "ngbe_type.h" #include "ngbe_mdio.h" #include "ngbe_hw.h" @@ -128,6 +131,10 @@ static int ngbe_sw_init(struct wx *wx) wx->tx_work_limit = NGBE_DEFAULT_TX_WORK; wx->rx_work_limit = NGBE_DEFAULT_RX_WORK; + wx->mbx.size = WX_VXMAILBOX_SIZE; + wx->setup_tc = ngbe_setup_tc; + set_bit(0, &wx->fwd_bitmask); + return 0; } @@ -167,7 +174,7 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data) struct wx_q_vector *q_vector; struct wx *wx = data; struct pci_dev *pdev; - u32 eicr; + u32 eicr, eicr_misc; q_vector = wx->q_vector[0]; pdev = wx->pdev; @@ -185,6 +192,10 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data) if (!(pdev->msi_enabled)) wr32(wx, WX_PX_INTA, 1); + eicr_misc = wx_misc_isb(wx, WX_ISB_MISC); + if (unlikely(eicr_misc & NGBE_PX_MISC_IC_TIMESYNC)) + wx_ptp_check_pps_event(wx); + wx->isb_mem[WX_ISB_MISC] = 0; /* would disable interrupts here but it is auto disabled */ napi_schedule_irqoff(&q_vector->napi); @@ -195,9 +206,13 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data) return IRQ_HANDLED; } -static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data) +static irqreturn_t __ngbe_msix_misc(struct wx *wx, u32 eicr) { - struct wx *wx = data; + if (eicr & NGBE_PX_MISC_IC_VF_MBOX) + wx_msg_task(wx); + + if (unlikely(eicr & NGBE_PX_MISC_IC_TIMESYNC)) + wx_ptp_check_pps_event(wx); /* re-enable the original interrupt state, no lsc, no queues */ if (netif_running(wx->netdev)) @@ -206,6 +221,35 @@ static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data) return IRQ_HANDLED; } +static irqreturn_t ngbe_msix_misc(int __always_unused irq, void *data) +{ + struct wx *wx = data; + u32 eicr; + + eicr = wx_misc_isb(wx, WX_ISB_MISC); + + return __ngbe_msix_misc(wx, eicr); +} + +static irqreturn_t ngbe_misc_and_queue(int __always_unused irq, void *data) +{ + struct wx_q_vector *q_vector; + struct wx *wx = data; + u32 eicr; + + eicr = wx_misc_isb(wx, WX_ISB_MISC); + if (!eicr) { + /* queue */ + q_vector = wx->q_vector[0]; + napi_schedule_irqoff(&q_vector->napi); + if (netif_running(wx->netdev)) + ngbe_irq_enable(wx, true); + return IRQ_HANDLED; + } + + return __ngbe_msix_misc(wx, eicr); +} + /** * ngbe_request_msix_irqs - Initialize MSI-X interrupts * @wx: board private structure @@ -238,8 +282,16 @@ static int ngbe_request_msix_irqs(struct wx *wx) } } - err = request_irq(wx->msix_entry->vector, - ngbe_msix_other, 0, netdev->name, wx); + /* Due to hardware design, when num_vfs < 7, pf can use 0 for misc and 1 + * for queue. But when num_vfs == 7, vector[1] is assigned to vf6. + * Misc and queue should reuse interrupt vector[0]. + */ + if (wx->num_vfs == 7) + err = request_irq(wx->msix_entry->vector, + ngbe_misc_and_queue, 0, netdev->name, wx); + else + err = request_irq(wx->msix_entry->vector, + ngbe_msix_misc, 0, netdev->name, wx); if (err) { wx_err(wx, "request_irq for msix_other failed: %d\n", err); @@ -291,6 +343,22 @@ static void ngbe_disable_device(struct wx *wx) struct net_device *netdev = wx->netdev; u32 i; + if (wx->num_vfs) { + /* Clear EITR Select mapping */ + wr32(wx, WX_PX_ITRSEL, 0); + + /* Mark all the VFs as inactive */ + for (i = 0; i < wx->num_vfs; i++) + wx->vfinfo[i].clear_to_send = 0; + wx->notify_down = true; + /* ping all the active vfs to let them know we are going down */ + wx_ping_all_vfs_with_link_status(wx, false); + wx->notify_down = false; + + /* Disable all VFTE/VFRE TX/RX */ + wx_disable_vf_rx_tx(wx); + } + /* disable all enabled rx queues */ for (i = 0; i < wx->num_rx_queues; i++) /* this call also flushes the previous write */ @@ -313,10 +381,19 @@ static void ngbe_disable_device(struct wx *wx) wx_update_stats(wx); } +static void ngbe_reset(struct wx *wx) +{ + wx_flush_sw_mac_table(wx); + wx_mac_set_default_filter(wx, wx->mac.addr); + if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) + wx_ptp_reset(wx); +} + void ngbe_down(struct wx *wx) { phylink_stop(wx->phylink); ngbe_disable_device(wx); + ngbe_reset(wx); wx_clean_all_tx_rings(wx); wx_clean_all_rx_rings(wx); } @@ -339,6 +416,11 @@ void ngbe_up(struct wx *wx) ngbe_sfp_modules_txrx_powerctl(wx, true); phylink_start(wx->phylink); + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + wr32m(wx, WX_CFG_PORT_CTL, + WX_CFG_PORT_CTL_PFRSTD, WX_CFG_PORT_CTL_PFRSTD); + if (wx->num_vfs) + wx_ping_all_vfs_with_link_status(wx, false); } /** @@ -379,6 +461,8 @@ static int ngbe_open(struct net_device *netdev) if (err) goto err_dis_phy; + wx_ptp_init(wx); + ngbe_up(wx); return 0; @@ -387,6 +471,7 @@ err_dis_phy: err_free_irq: wx_free_irq(wx); err_free_resources: + wx_free_isb_resources(wx); wx_free_resources(wx); return err; } @@ -406,8 +491,10 @@ static int ngbe_close(struct net_device *netdev) { struct wx *wx = netdev_priv(netdev); + wx_ptp_stop(wx); ngbe_down(wx); wx_free_irq(wx); + wx_free_isb_resources(wx); wx_free_resources(wx); phylink_disconnect_phy(wx->phylink); wx_control_hw(wx, false); @@ -499,11 +586,15 @@ static const struct net_device_ops ngbe_netdev_ops = { .ndo_start_xmit = wx_xmit_frame, .ndo_set_rx_mode = wx_set_rx_mode, .ndo_set_features = wx_set_features, + .ndo_fix_features = wx_fix_features, + .ndo_features_check = wx_features_check, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = wx_set_mac, .ndo_get_stats64 = wx_get_stats64, .ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid, + .ndo_hwtstamp_set = wx_hwtstamp_set, + .ndo_hwtstamp_get = wx_hwtstamp_get, }; /** @@ -575,6 +666,10 @@ static int ngbe_probe(struct pci_dev *pdev, goto err_pci_release_regions; } + /* The emerald supports up to 8 VFs per pf, but physical + * function also need one pool for basic networking. + */ + pci_sriov_set_totalvfs(pdev, NGBE_MAX_VFS_DRV_LIMIT); wx->driver_name = ngbe_driver_name; ngbe_set_ethtool_ops(netdev); netdev->netdev_ops = &ngbe_netdev_ops; @@ -604,7 +699,7 @@ static int ngbe_probe(struct pci_dev *pdev, /* setup the private structure */ err = ngbe_sw_init(wx); if (err) - goto err_free_mac_table; + goto err_pci_release_regions; /* check if flash load is done after hw power up */ err = wx_check_flash_load(wx, NGBE_SPI_ILDR_STATUS_PERST); @@ -649,7 +744,7 @@ static int ngbe_probe(struct pci_dev *pdev, if (wx->wol_hw_supported) wx->wol = NGBE_PSR_WKUP_CTL_MAG; - netdev->wol_enabled = !!(wx->wol); + netdev->ethtool->wol_enabled = !!(wx->wol); wr32(wx, NGBE_PSR_WKUP_CTL, wx->wol); device_set_wakeup_enable(&pdev->dev, wx->wol); @@ -698,6 +793,7 @@ err_register: err_clear_interrupt_scheme: wx_clear_interrupt_scheme(wx); err_free_mac_table: + kfree(wx->rss_key); kfree(wx->mac_table); err_pci_release_regions: pci_release_selected_regions(pdev, @@ -722,6 +818,7 @@ static void ngbe_remove(struct pci_dev *pdev) struct net_device *netdev; netdev = wx->netdev; + wx_disable_sriov(wx); unregister_netdev(netdev); phylink_destroy(wx->phylink); pci_release_selected_regions(pdev, @@ -781,6 +878,7 @@ static struct pci_driver ngbe_driver = { .suspend = ngbe_suspend, .resume = ngbe_resume, .shutdown = ngbe_shutdown, + .sriov_configure = wx_pci_sriov_configure, }; module_pci_driver(ngbe_driver); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c index ec54b18c5fe7..c63bb6e6f405 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c @@ -7,7 +7,9 @@ #include <linux/phy.h> #include "../libwx/wx_type.h" +#include "../libwx/wx_ptp.h" #include "../libwx/wx_hw.h" +#include "../libwx/wx_sriov.h" #include "ngbe_type.h" #include "ngbe_mdio.h" @@ -64,6 +66,13 @@ static void ngbe_mac_config(struct phylink_config *config, unsigned int mode, static void ngbe_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { + struct wx *wx = phylink_to_wx(config); + + wx->speed = SPEED_UNKNOWN; + if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) + wx_ptp_reset_cyclecounter(wx); + /* ping all the active vfs to let them know we are going down */ + wx_ping_all_vfs_with_link_status(wx, false); } static void ngbe_mac_link_up(struct phylink_config *config, @@ -103,6 +112,13 @@ static void ngbe_mac_link_up(struct phylink_config *config, wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); reg = rd32(wx, WX_MAC_WDG_TIMEOUT); wr32(wx, WX_MAC_WDG_TIMEOUT, reg); + + wx->speed = speed; + wx->last_rx_ptp_check = jiffies; + if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) + wx_ptp_reset_cyclecounter(wx); + /* ping all the active vfs to let them know we are going up */ + wx_ping_all_vfs_with_link_status(wx, true); } static const struct phylink_mac_ops ngbe_mac_ops = { @@ -124,8 +140,12 @@ static int ngbe_phylink_init(struct wx *wx) MAC_SYM_PAUSE | MAC_ASYM_PAUSE; config->mac_managed_pm = true; - phy_mode = PHY_INTERFACE_MODE_RGMII_ID; - __set_bit(PHY_INTERFACE_MODE_RGMII_ID, config->supported_interfaces); + /* The MAC only has add the Tx delay and it can not be modified. + * So just disable TX delay in PHY, and it is does not matter to + * internal phy. + */ + phy_mode = PHY_INTERFACE_MODE_RGMII_RXID; + __set_bit(PHY_INTERFACE_MODE_RGMII_RXID, config->supported_interfaces); phylink = phylink_create(config, NULL, phy_mode, &ngbe_mac_ops); if (IS_ERR(phylink)) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h index f48ed7fc1805..bb74263f0498 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h @@ -70,15 +70,22 @@ /* Extended Interrupt Enable Set */ #define NGBE_PX_MISC_IEN_DEV_RST BIT(10) +#define NGBE_PX_MISC_IEN_TIMESYNC BIT(11) #define NGBE_PX_MISC_IEN_ETH_LK BIT(18) #define NGBE_PX_MISC_IEN_INT_ERR BIT(20) +#define NGBE_PX_MISC_IC_VF_MBOX BIT(23) #define NGBE_PX_MISC_IEN_GPIO BIT(26) #define NGBE_PX_MISC_IEN_MASK ( \ NGBE_PX_MISC_IEN_DEV_RST | \ + NGBE_PX_MISC_IEN_TIMESYNC | \ NGBE_PX_MISC_IEN_ETH_LK | \ NGBE_PX_MISC_IEN_INT_ERR | \ + NGBE_PX_MISC_IC_VF_MBOX | \ NGBE_PX_MISC_IEN_GPIO) +/* Extended Interrupt Cause Read */ +#define NGBE_PX_MISC_IC_TIMESYNC BIT(11) /* time sync */ + #define NGBE_INTR_ALL 0x1FF #define NGBE_INTR_MISC BIT(0) @@ -129,6 +136,7 @@ #define NGBE_MAX_RXD 8192 #define NGBE_MIN_RXD 128 +#define NGBE_MAX_VFS_DRV_LIMIT 7 extern char ngbe_driver_name[]; void ngbe_down(struct wx *wx); diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile index 42718875277c..c757fa95e58e 100644 --- a/drivers/net/ethernet/wangxun/txgbe/Makefile +++ b/drivers/net/ethernet/wangxun/txgbe/Makefile @@ -10,4 +10,6 @@ txgbe-objs := txgbe_main.o \ txgbe_hw.o \ txgbe_phy.o \ txgbe_irq.o \ - txgbe_ethtool.o + txgbe_fdir.o \ + txgbe_ethtool.o \ + txgbe_aml.o diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c new file mode 100644 index 000000000000..7dbcf41750c1 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c @@ -0,0 +1,385 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/phylink.h> +#include <linux/iopoll.h> +#include <linux/pci.h> +#include <linux/phy.h> + +#include "../libwx/wx_type.h" +#include "../libwx/wx_lib.h" +#include "../libwx/wx_ptp.h" +#include "../libwx/wx_hw.h" +#include "../libwx/wx_sriov.h" +#include "txgbe_type.h" +#include "txgbe_aml.h" +#include "txgbe_hw.h" + +void txgbe_gpio_init_aml(struct wx *wx) +{ + u32 status; + + wr32(wx, WX_GPIO_INTTYPE_LEVEL, TXGBE_GPIOBIT_2 | TXGBE_GPIOBIT_3); + wr32(wx, WX_GPIO_INTEN, TXGBE_GPIOBIT_2 | TXGBE_GPIOBIT_3); + + status = rd32(wx, WX_GPIO_INTSTATUS); + for (int i = 0; i < 6; i++) { + if (status & BIT(i)) + wr32(wx, WX_GPIO_EOI, BIT(i)); + } +} + +irqreturn_t txgbe_gpio_irq_handler_aml(int irq, void *data) +{ + struct txgbe *txgbe = data; + struct wx *wx = txgbe->wx; + u32 status; + + wr32(wx, WX_GPIO_INTMASK, 0xFF); + status = rd32(wx, WX_GPIO_INTSTATUS); + if (status & TXGBE_GPIOBIT_2) { + set_bit(WX_FLAG_NEED_SFP_RESET, wx->flags); + wr32(wx, WX_GPIO_EOI, TXGBE_GPIOBIT_2); + wx_service_event_schedule(wx); + } + if (status & TXGBE_GPIOBIT_3) { + set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags); + wx_service_event_schedule(wx); + wr32(wx, WX_GPIO_EOI, TXGBE_GPIOBIT_3); + } + + wr32(wx, WX_GPIO_INTMASK, 0); + return IRQ_HANDLED; +} + +int txgbe_test_hostif(struct wx *wx) +{ + struct txgbe_hic_ephy_getlink buffer; + + if (wx->mac.type != wx_mac_aml) + return 0; + + buffer.hdr.cmd = FW_PHY_GET_LINK_CMD; + buffer.hdr.buf_len = sizeof(struct txgbe_hic_ephy_getlink) - + sizeof(struct wx_hic_hdr); + buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + + return wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer), + WX_HI_COMMAND_TIMEOUT, true); +} + +static int txgbe_identify_sfp_hostif(struct wx *wx, struct txgbe_hic_i2c_read *buffer) +{ + buffer->hdr.cmd = FW_READ_SFP_INFO_CMD; + buffer->hdr.buf_len = sizeof(struct txgbe_hic_i2c_read) - + sizeof(struct wx_hic_hdr); + buffer->hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + + return wx_host_interface_command(wx, (u32 *)buffer, + sizeof(struct txgbe_hic_i2c_read), + WX_HI_COMMAND_TIMEOUT, true); +} + +static int txgbe_set_phy_link_hostif(struct wx *wx, int speed, int autoneg, int duplex) +{ + struct txgbe_hic_ephy_setlink buffer; + + buffer.hdr.cmd = FW_PHY_SET_LINK_CMD; + buffer.hdr.buf_len = sizeof(struct txgbe_hic_ephy_setlink) - + sizeof(struct wx_hic_hdr); + buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + + switch (speed) { + case SPEED_25000: + buffer.speed = TXGBE_LINK_SPEED_25GB_FULL; + break; + case SPEED_10000: + buffer.speed = TXGBE_LINK_SPEED_10GB_FULL; + break; + } + + buffer.fec_mode = TXGBE_PHY_FEC_AUTO; + buffer.autoneg = autoneg; + buffer.duplex = duplex; + + return wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer), + WX_HI_COMMAND_TIMEOUT, true); +} + +static void txgbe_get_link_capabilities(struct wx *wx) +{ + struct txgbe *txgbe = wx->priv; + + if (test_bit(PHY_INTERFACE_MODE_25GBASER, txgbe->sfp_interfaces)) + wx->adv_speed = SPEED_25000; + else if (test_bit(PHY_INTERFACE_MODE_10GBASER, txgbe->sfp_interfaces)) + wx->adv_speed = SPEED_10000; + else + wx->adv_speed = SPEED_UNKNOWN; + + wx->adv_duplex = wx->adv_speed == SPEED_UNKNOWN ? + DUPLEX_HALF : DUPLEX_FULL; +} + +static void txgbe_get_phy_link(struct wx *wx, int *speed) +{ + u32 status; + + status = rd32(wx, TXGBE_CFG_PORT_ST); + if (!(status & TXGBE_CFG_PORT_ST_LINK_UP)) + *speed = SPEED_UNKNOWN; + else if (status & TXGBE_CFG_PORT_ST_LINK_AML_25G) + *speed = SPEED_25000; + else if (status & TXGBE_CFG_PORT_ST_LINK_AML_10G) + *speed = SPEED_10000; + else + *speed = SPEED_UNKNOWN; +} + +int txgbe_set_phy_link(struct wx *wx) +{ + int speed, err; + u32 gpio; + + /* Check RX signal */ + gpio = rd32(wx, WX_GPIO_EXT); + if (gpio & TXGBE_GPIOBIT_3) + return -ENODEV; + + txgbe_get_link_capabilities(wx); + if (wx->adv_speed == SPEED_UNKNOWN) + return -ENODEV; + + txgbe_get_phy_link(wx, &speed); + if (speed == wx->adv_speed) + return 0; + + err = txgbe_set_phy_link_hostif(wx, wx->adv_speed, 0, wx->adv_duplex); + if (err) { + wx_err(wx, "Failed to setup link\n"); + return err; + } + + return 0; +} + +static int txgbe_sfp_to_linkmodes(struct wx *wx, struct txgbe_sfp_id *id) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, }; + DECLARE_PHY_INTERFACE_MASK(interfaces); + struct txgbe *txgbe = wx->priv; + + if (id->com_25g_code & (TXGBE_SFF_25GBASESR_CAPABLE | + TXGBE_SFF_25GBASEER_CAPABLE | + TXGBE_SFF_25GBASELR_CAPABLE)) { + phylink_set(modes, 25000baseSR_Full); + __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces); + } + if (id->com_10g_code & TXGBE_SFF_10GBASESR_CAPABLE) { + phylink_set(modes, 10000baseSR_Full); + __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); + } + if (id->com_10g_code & TXGBE_SFF_10GBASELR_CAPABLE) { + phylink_set(modes, 10000baseLR_Full); + __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); + } + + if (phy_interface_empty(interfaces)) { + wx_err(wx, "unsupported SFP module\n"); + return -EINVAL; + } + + phylink_set(modes, Pause); + phylink_set(modes, Asym_Pause); + phylink_set(modes, FIBRE); + txgbe->link_port = PORT_FIBRE; + + if (!linkmode_equal(txgbe->sfp_support, modes)) { + linkmode_copy(txgbe->sfp_support, modes); + phy_interface_and(txgbe->sfp_interfaces, + wx->phylink_config.supported_interfaces, + interfaces); + linkmode_copy(txgbe->advertising, modes); + + set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags); + } + + return 0; +} + +int txgbe_identify_sfp(struct wx *wx) +{ + struct txgbe_hic_i2c_read buffer; + struct txgbe_sfp_id *id; + int err = 0; + u32 gpio; + + gpio = rd32(wx, WX_GPIO_EXT); + if (gpio & TXGBE_GPIOBIT_2) + return -ENODEV; + + err = txgbe_identify_sfp_hostif(wx, &buffer); + if (err) { + wx_err(wx, "Failed to identify SFP module\n"); + return err; + } + + id = &buffer.id; + if (id->identifier != TXGBE_SFF_IDENTIFIER_SFP) { + wx_err(wx, "Invalid SFP module\n"); + return -ENODEV; + } + + err = txgbe_sfp_to_linkmodes(wx, id); + if (err) + return err; + + if (gpio & TXGBE_GPIOBIT_3) + set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags); + + return 0; +} + +void txgbe_setup_link(struct wx *wx) +{ + struct txgbe *txgbe = wx->priv; + + phy_interface_zero(txgbe->sfp_interfaces); + linkmode_zero(txgbe->sfp_support); + + txgbe_identify_sfp(wx); +} + +static void txgbe_get_link_state(struct phylink_config *config, + struct phylink_link_state *state) +{ + struct wx *wx = phylink_to_wx(config); + int speed; + + txgbe_get_phy_link(wx, &speed); + state->link = speed != SPEED_UNKNOWN; + state->speed = speed; + state->duplex = state->link ? DUPLEX_FULL : DUPLEX_UNKNOWN; +} + +static void txgbe_reconfig_mac(struct wx *wx) +{ + u32 wdg, fc; + + wdg = rd32(wx, WX_MAC_WDG_TIMEOUT); + fc = rd32(wx, WX_MAC_RX_FLOW_CTRL); + + wr32(wx, WX_MIS_RST, TXGBE_MIS_RST_MAC_RST(wx->bus.func)); + /* wait for MAC reset complete */ + usleep_range(1000, 1500); + + wr32m(wx, TXGBE_MAC_MISC_CTL, TXGBE_MAC_MISC_CTL_LINK_STS_MOD, + TXGBE_MAC_MISC_CTL_LINK_BOTH); + wx_reset_mac(wx); + + wr32(wx, WX_MAC_WDG_TIMEOUT, wdg); + wr32(wx, WX_MAC_RX_FLOW_CTRL, fc); +} + +static void txgbe_mac_link_up_aml(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, + phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct wx *wx = phylink_to_wx(config); + u32 txcfg; + + wx_fc_enable(wx, tx_pause, rx_pause); + + txgbe_reconfig_mac(wx); + + txcfg = rd32(wx, TXGBE_AML_MAC_TX_CFG); + txcfg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK; + + switch (speed) { + case SPEED_25000: + txcfg |= TXGBE_AML_MAC_TX_CFG_SPEED_25G; + break; + case SPEED_10000: + txcfg |= TXGBE_AML_MAC_TX_CFG_SPEED_10G; + break; + default: + break; + } + + wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE); + wr32(wx, TXGBE_AML_MAC_TX_CFG, txcfg | TXGBE_AML_MAC_TX_CFG_TE); + + wx->speed = speed; + wx->last_rx_ptp_check = jiffies; + if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) + wx_ptp_reset_cyclecounter(wx); + /* ping all the active vfs to let them know we are going up */ + wx_ping_all_vfs_with_link_status(wx, true); +} + +static void txgbe_mac_link_down_aml(struct phylink_config *config, + unsigned int mode, + phy_interface_t interface) +{ + struct wx *wx = phylink_to_wx(config); + + wr32m(wx, TXGBE_AML_MAC_TX_CFG, TXGBE_AML_MAC_TX_CFG_TE, 0); + wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0); + + wx->speed = SPEED_UNKNOWN; + if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) + wx_ptp_reset_cyclecounter(wx); + /* ping all the active vfs to let them know we are going down */ + wx_ping_all_vfs_with_link_status(wx, false); +} + +static void txgbe_mac_config_aml(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ +} + +static const struct phylink_mac_ops txgbe_mac_ops_aml = { + .mac_config = txgbe_mac_config_aml, + .mac_link_down = txgbe_mac_link_down_aml, + .mac_link_up = txgbe_mac_link_up_aml, +}; + +int txgbe_phylink_init_aml(struct txgbe *txgbe) +{ + struct phylink_link_state state; + struct phylink_config *config; + struct wx *wx = txgbe->wx; + phy_interface_t phy_mode; + struct phylink *phylink; + int err; + + config = &wx->phylink_config; + config->dev = &wx->netdev->dev; + config->type = PHYLINK_NETDEV; + config->mac_capabilities = MAC_25000FD | MAC_10000FD | + MAC_SYM_PAUSE | MAC_ASYM_PAUSE; + config->get_fixed_state = txgbe_get_link_state; + + phy_mode = PHY_INTERFACE_MODE_25GBASER; + __set_bit(PHY_INTERFACE_MODE_25GBASER, config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_10GBASER, config->supported_interfaces); + + phylink = phylink_create(config, NULL, phy_mode, &txgbe_mac_ops_aml); + if (IS_ERR(phylink)) + return PTR_ERR(phylink); + + state.speed = SPEED_25000; + state.duplex = DUPLEX_FULL; + err = phylink_set_fixed_link(phylink, &state); + if (err) { + wx_err(wx, "Failed to set fixed link\n"); + return err; + } + + wx->phylink = phylink; + + return 0; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h new file mode 100644 index 000000000000..25d4971ca0d9 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_AML_H_ +#define _TXGBE_AML_H_ + +void txgbe_gpio_init_aml(struct wx *wx); +irqreturn_t txgbe_gpio_irq_handler_aml(int irq, void *data); +int txgbe_test_hostif(struct wx *wx); +int txgbe_set_phy_link(struct wx *wx); +int txgbe_identify_sfp(struct wx *wx); +void txgbe_setup_link(struct wx *wx); +int txgbe_phylink_init_aml(struct txgbe *txgbe); + +#endif /* _TXGBE_AML_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index db675512ce4d..a4753402660e 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -9,8 +9,34 @@ #include "../libwx/wx_type.h" #include "../libwx/wx_lib.h" #include "txgbe_type.h" +#include "txgbe_fdir.h" #include "txgbe_ethtool.h" +int txgbe_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct wx *wx = netdev_priv(netdev); + struct txgbe *txgbe = wx->priv; + int err; + + if (wx->mac.type == wx_mac_aml40) + return -EOPNOTSUPP; + + err = wx_get_link_ksettings(netdev, cmd); + if (err) + return err; + + if (wx->mac.type == wx_mac_sp) + return 0; + + cmd->base.port = txgbe->link_port; + cmd->base.autoneg = AUTONEG_DISABLE; + linkmode_copy(cmd->link_modes.supported, txgbe->sfp_support); + linkmode_copy(cmd->link_modes.advertising, txgbe->advertising); + + return 0; +} + static int txgbe_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, @@ -19,7 +45,7 @@ static int txgbe_set_ringparam(struct net_device *netdev, struct wx *wx = netdev_priv(netdev); u32 new_rx_count, new_tx_count; struct wx_ring *temp_ring; - int i; + int i, err = 0; new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD); new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE); @@ -31,6 +57,10 @@ static int txgbe_set_ringparam(struct net_device *netdev, new_rx_count == wx->rx_ring_count) return 0; + err = wx_set_state_reset(wx); + if (err) + return err; + if (!netif_running(wx->netdev)) { for (i = 0; i < wx->num_tx_queues; i++) wx->tx_ring[i]->count = new_tx_count; @@ -39,14 +69,16 @@ static int txgbe_set_ringparam(struct net_device *netdev, wx->tx_ring_count = new_tx_count; wx->rx_ring_count = new_rx_count; - return 0; + goto clear_reset; } /* allocate temporary buffer to store rings in */ i = max_t(int, wx->num_tx_queues, wx->num_rx_queues); temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL); - if (!temp_ring) - return -ENOMEM; + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } txgbe_down(wx); @@ -55,7 +87,9 @@ static int txgbe_set_ringparam(struct net_device *netdev, txgbe_up(wx); - return 0; +clear_reset: + clear_bit(WX_STATE_RESETTING, wx->state); + return err; } static int txgbe_set_channels(struct net_device *dev, @@ -71,13 +105,444 @@ static int txgbe_set_channels(struct net_device *dev, return txgbe_setup_tc(dev, netdev_get_num_tc(dev)); } +static int txgbe_get_ethtool_fdir_entry(struct txgbe *txgbe, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + union txgbe_atr_input *mask = &txgbe->fdir_mask; + struct txgbe_fdir_filter *rule = NULL; + struct hlist_node *node; + + /* report total rule count */ + cmd->data = (1024 << TXGBE_FDIR_PBALLOC_64K) - 2; + + hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list, + fdir_node) { + if (fsp->location <= rule->sw_idx) + break; + } + + if (!rule || fsp->location != rule->sw_idx) + return -EINVAL; + + /* set flow type field */ + switch (rule->filter.formatted.flow_type) { + case TXGBE_ATR_FLOW_TYPE_TCPV4: + fsp->flow_type = TCP_V4_FLOW; + break; + case TXGBE_ATR_FLOW_TYPE_UDPV4: + fsp->flow_type = UDP_V4_FLOW; + break; + case TXGBE_ATR_FLOW_TYPE_SCTPV4: + fsp->flow_type = SCTP_V4_FLOW; + break; + case TXGBE_ATR_FLOW_TYPE_IPV4: + fsp->flow_type = IP_USER_FLOW; + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + fsp->h_u.usr_ip4_spec.proto = 0; + fsp->m_u.usr_ip4_spec.proto = 0; + break; + default: + return -EINVAL; + } + + fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; + fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; + fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; + fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; + fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; + fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; + fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; + fsp->m_ext.vlan_etype = mask->formatted.flex_bytes; + fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool); + fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool); + fsp->flow_type |= FLOW_EXT; + + /* record action */ + if (rule->action == TXGBE_RDB_FDIR_DROP_QUEUE) + fsp->ring_cookie = RX_CLS_FLOW_DISC; + else + fsp->ring_cookie = rule->action; + + return 0; +} + +static int txgbe_get_ethtool_fdir_all(struct txgbe *txgbe, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct txgbe_fdir_filter *rule; + struct hlist_node *node; + int cnt = 0; + + /* report total rule count */ + cmd->data = (1024 << TXGBE_FDIR_PBALLOC_64K) - 2; + + hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list, + fdir_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = rule->sw_idx; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + +static int txgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct wx *wx = netdev_priv(dev); + struct txgbe *txgbe = wx->priv; + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = wx->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = txgbe->fdir_filter_count; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = txgbe_get_ethtool_fdir_entry(txgbe, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = txgbe_get_ethtool_fdir_all(txgbe, cmd, (u32 *)rule_locs); + break; + default: + break; + } + + return ret; +} + +static int txgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, + u8 *flow_type) +{ + switch (fsp->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + *flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case UDP_V4_FLOW: + *flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4; + break; + case SCTP_V4_FLOW: + *flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4; + break; + case IP_USER_FLOW: + switch (fsp->h_u.usr_ip4_spec.proto) { + case IPPROTO_TCP: + *flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case IPPROTO_UDP: + *flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4; + break; + case IPPROTO_SCTP: + *flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4; + break; + case 0: + if (!fsp->m_u.usr_ip4_spec.proto) { + *flow_type = TXGBE_ATR_FLOW_TYPE_IPV4; + break; + } + fallthrough; + default: + return -EINVAL; + } + break; + default: + return -EINVAL; + } + + return 0; +} + +static bool txgbe_match_ethtool_fdir_entry(struct txgbe *txgbe, + struct txgbe_fdir_filter *input) +{ + struct txgbe_fdir_filter *rule = NULL; + struct hlist_node *node2; + + hlist_for_each_entry_safe(rule, node2, &txgbe->fdir_filter_list, + fdir_node) { + if (rule->filter.formatted.bkt_hash == + input->filter.formatted.bkt_hash && + rule->action == input->action) { + wx_dbg(txgbe->wx, "FDIR entry already exist\n"); + return true; + } + } + return false; +} + +static int txgbe_update_ethtool_fdir_entry(struct txgbe *txgbe, + struct txgbe_fdir_filter *input, + u16 sw_idx) +{ + struct hlist_node *node = NULL, *parent = NULL; + struct txgbe_fdir_filter *rule; + struct wx *wx = txgbe->wx; + bool deleted = false; + int err; + + hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list, + fdir_node) { + /* hash found, or no matching entry */ + if (rule->sw_idx >= sw_idx) + break; + parent = node; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && rule->sw_idx == sw_idx) { + /* hardware filters are only configured when interface is up, + * and we should not issue filter commands while the interface + * is down + */ + if (netif_running(wx->netdev) && + (!input || rule->filter.formatted.bkt_hash != + input->filter.formatted.bkt_hash)) { + err = txgbe_fdir_erase_perfect_filter(wx, + &rule->filter, + sw_idx); + if (err) + return -EINVAL; + } + + hlist_del(&rule->fdir_node); + kfree(rule); + txgbe->fdir_filter_count--; + deleted = true; + } + + /* If we weren't given an input, then this was a request to delete a + * filter. We should return -EINVAL if the filter wasn't found, but + * return 0 if the rule was successfully deleted. + */ + if (!input) + return deleted ? 0 : -EINVAL; + + /* initialize node and set software index */ + INIT_HLIST_NODE(&input->fdir_node); + + /* add filter to the list */ + if (parent) + hlist_add_behind(&input->fdir_node, parent); + else + hlist_add_head(&input->fdir_node, + &txgbe->fdir_filter_list); + + /* update counts */ + txgbe->fdir_filter_count++; + + return 0; +} + +static int txgbe_add_ethtool_fdir_entry(struct txgbe *txgbe, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct txgbe_fdir_filter *input; + union txgbe_atr_input mask; + struct wx *wx = txgbe->wx; + int err = -EINVAL; + u16 ptype = 0; + u8 queue; + + if (!(test_bit(WX_FLAG_FDIR_PERFECT, wx->flags))) + return -EOPNOTSUPP; + + /* ring_cookie is a masked into a set of queues and txgbe pools or + * we use drop index + */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + queue = TXGBE_RDB_FDIR_DROP_QUEUE; + } else { + u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); + + if (!vf && ring >= wx->num_rx_queues) + return -EINVAL; + else if (vf && (vf > wx->num_vfs || + ring >= wx->num_rx_queues_per_pool)) + return -EINVAL; + + /* Map the ring onto the absolute queue index */ + if (!vf) + queue = wx->rx_ring[ring]->reg_idx; + else + queue = ((vf - 1) * wx->num_rx_queues_per_pool) + ring; + } + + /* Don't allow indexes to exist outside of available space */ + if (fsp->location >= ((1024 << TXGBE_FDIR_PBALLOC_64K) - 2)) { + wx_err(wx, "Location out of range\n"); + return -EINVAL; + } + + input = kzalloc(sizeof(*input), GFP_ATOMIC); + if (!input) + return -ENOMEM; + + memset(&mask, 0, sizeof(union txgbe_atr_input)); + + /* set SW index */ + input->sw_idx = fsp->location; + + /* record flow type */ + if (txgbe_flowspec_to_flow_type(fsp, + &input->filter.formatted.flow_type)) { + wx_err(wx, "Unrecognized flow type\n"); + goto err_out; + } + + mask.formatted.flow_type = TXGBE_ATR_L4TYPE_IPV6_MASK | + TXGBE_ATR_L4TYPE_MASK; + + if (input->filter.formatted.flow_type == TXGBE_ATR_FLOW_TYPE_IPV4) + mask.formatted.flow_type &= TXGBE_ATR_L4TYPE_IPV6_MASK; + + /* Copy input into formatted structures */ + input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; + mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; + input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; + mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; + input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; + mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; + input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; + mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; + + if (fsp->flow_type & FLOW_EXT) { + input->filter.formatted.vm_pool = + (unsigned char)ntohl(fsp->h_ext.data[1]); + mask.formatted.vm_pool = + (unsigned char)ntohl(fsp->m_ext.data[1]); + input->filter.formatted.flex_bytes = + fsp->h_ext.vlan_etype; + mask.formatted.flex_bytes = fsp->m_ext.vlan_etype; + } + + switch (input->filter.formatted.flow_type) { + case TXGBE_ATR_FLOW_TYPE_TCPV4: + ptype = WX_PTYPE_L2_IPV4_TCP; + break; + case TXGBE_ATR_FLOW_TYPE_UDPV4: + ptype = WX_PTYPE_L2_IPV4_UDP; + break; + case TXGBE_ATR_FLOW_TYPE_SCTPV4: + ptype = WX_PTYPE_L2_IPV4_SCTP; + break; + case TXGBE_ATR_FLOW_TYPE_IPV4: + ptype = WX_PTYPE_L2_IPV4; + break; + default: + break; + } + + input->filter.formatted.vlan_id = htons(ptype); + if (mask.formatted.flow_type & TXGBE_ATR_L4TYPE_MASK) + mask.formatted.vlan_id = htons(0xFFFF); + else + mask.formatted.vlan_id = htons(0xFFF8); + + /* determine if we need to drop or route the packet */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) + input->action = TXGBE_RDB_FDIR_DROP_QUEUE; + else + input->action = fsp->ring_cookie; + + spin_lock(&txgbe->fdir_perfect_lock); + + if (hlist_empty(&txgbe->fdir_filter_list)) { + /* save mask and program input mask into HW */ + memcpy(&txgbe->fdir_mask, &mask, sizeof(mask)); + err = txgbe_fdir_set_input_mask(wx, &mask); + if (err) + goto err_unlock; + } else if (memcmp(&txgbe->fdir_mask, &mask, sizeof(mask))) { + wx_err(wx, "Hardware only supports one mask per port. To change the mask you must first delete all the rules.\n"); + goto err_unlock; + } + + /* apply mask and compute/store hash */ + txgbe_atr_compute_perfect_hash(&input->filter, &mask); + + /* check if new entry does not exist on filter list */ + if (txgbe_match_ethtool_fdir_entry(txgbe, input)) + goto err_unlock; + + /* only program filters to hardware if the net device is running, as + * we store the filters in the Rx buffer which is not allocated when + * the device is down + */ + if (netif_running(wx->netdev)) { + err = txgbe_fdir_write_perfect_filter(wx, &input->filter, + input->sw_idx, queue); + if (err) + goto err_unlock; + } + + txgbe_update_ethtool_fdir_entry(txgbe, input, input->sw_idx); + + spin_unlock(&txgbe->fdir_perfect_lock); + + return 0; +err_unlock: + spin_unlock(&txgbe->fdir_perfect_lock); +err_out: + kfree(input); + return err; +} + +static int txgbe_del_ethtool_fdir_entry(struct txgbe *txgbe, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + int err = 0; + + spin_lock(&txgbe->fdir_perfect_lock); + err = txgbe_update_ethtool_fdir_entry(txgbe, NULL, fsp->location); + spin_unlock(&txgbe->fdir_perfect_lock); + + return err; +} + +static int txgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct wx *wx = netdev_priv(dev); + struct txgbe *txgbe = wx->priv; + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = txgbe_add_ethtool_fdir_entry(txgbe, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = txgbe_del_ethtool_fdir_entry(txgbe, cmd); + break; + default: + break; + } + + return ret; +} + static const struct ethtool_ops txgbe_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, .get_drvinfo = wx_get_drvinfo, .nway_reset = wx_nway_reset, .get_link = ethtool_op_get_link, - .get_link_ksettings = wx_get_link_ksettings, + .get_link_ksettings = txgbe_get_link_ksettings, .set_link_ksettings = wx_set_link_ksettings, .get_sset_count = wx_get_sset_count, .get_strings = wx_get_strings, @@ -92,8 +557,12 @@ static const struct ethtool_ops txgbe_ethtool_ops = { .set_coalesce = wx_set_coalesce, .get_channels = wx_get_channels, .set_channels = txgbe_set_channels, + .get_rxnfc = txgbe_get_rxnfc, + .set_rxnfc = txgbe_set_rxnfc, .get_msglevel = wx_get_msglevel, .set_msglevel = wx_set_msglevel, + .get_ts_info = wx_get_ts_info, + .get_ts_stats = wx_get_ptp_stats, }; void txgbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h index ace1b3571012..66dbc8ec1bb6 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h @@ -4,6 +4,8 @@ #ifndef _TXGBE_ETHTOOL_H_ #define _TXGBE_ETHTOOL_H_ +int txgbe_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd); void txgbe_set_ethtool_ops(struct net_device *netdev); #endif /* _TXGBE_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c new file mode 100644 index 000000000000..a84010828551 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c @@ -0,0 +1,648 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/string.h> +#include <linux/types.h> +#include <linux/pci.h> + +#include "../libwx/wx_type.h" +#include "../libwx/wx_lib.h" +#include "../libwx/wx_hw.h" +#include "txgbe_type.h" +#include "txgbe_fdir.h" + +/* These defines allow us to quickly generate all of the necessary instructions + * in the function below by simply calling out TXGBE_COMPUTE_SIG_HASH_ITERATION + * for values 0 through 15 + */ +#define TXGBE_ATR_COMMON_HASH_KEY \ + (TXGBE_ATR_BUCKET_HASH_KEY & TXGBE_ATR_SIGNATURE_HASH_KEY) +#define TXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (TXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ + common_hash ^= lo_hash_dword >> n; \ + else if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + else if (TXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ + sig_hash ^= lo_hash_dword << (16 - n); \ + if (TXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ + common_hash ^= hi_hash_dword >> n; \ + else if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ + else if (TXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ + sig_hash ^= hi_hash_dword << (16 - n); \ +} while (0) + +/** + * txgbe_atr_compute_sig_hash - Compute the signature hash + * @input: input bitstream to compute the hash on + * @common: compressed common input dword + * @hash: pointer to the computed hash + * + * This function is almost identical to the function above but contains + * several optimizations such as unwinding all of the loops, letting the + * compiler work out all of the conditional ifs since the keys are static + * defines, and computing two keys at once since the hashed dword stream + * will be the same for both keys. + **/ +static void txgbe_atr_compute_sig_hash(union txgbe_atr_hash_dword input, + union txgbe_atr_hash_dword common, + u32 *hash) +{ + u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 i; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = ntohl(input.dword); + + /* generate common hash dword */ + hi_hash_dword = ntohl(common.dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + TXGBE_COMPUTE_SIG_HASH_ITERATION(0); + + /* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the VLAN until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + for (i = 1; i <= 15; i++) + TXGBE_COMPUTE_SIG_HASH_ITERATION(i); + + /* combine common_hash result with signature and bucket hashes */ + bucket_hash ^= common_hash; + bucket_hash &= TXGBE_ATR_HASH_MASK; + + sig_hash ^= common_hash << 16; + sig_hash &= TXGBE_ATR_HASH_MASK << 16; + + /* return completed signature hash */ + *hash = sig_hash ^ bucket_hash; +} + +#define TXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ +} while (0) + +/** + * txgbe_atr_compute_perfect_hash - Compute the perfect filter hash + * @input: input bitstream to compute the hash on + * @input_mask: mask for the input bitstream + * + * This function serves two main purposes. First it applies the input_mask + * to the atr_input resulting in a cleaned up atr_input data stream. + * Secondly it computes the hash and stores it in the bkt_hash field at + * the end of the input byte stream. This way it will be available for + * future use without needing to recompute the hash. + **/ +void txgbe_atr_compute_perfect_hash(union txgbe_atr_input *input, + union txgbe_atr_input *input_mask) +{ + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 bucket_hash = 0; + __be32 hi_dword = 0; + u32 i = 0; + + /* Apply masks to input data */ + for (i = 0; i < 11; i++) + input->dword_stream[i] &= input_mask->dword_stream[i]; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = ntohl(input->dword_stream[0]); + + /* generate common hash dword */ + for (i = 1; i <= 10; i++) + hi_dword ^= input->dword_stream[i]; + hi_hash_dword = ntohl(hi_dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + TXGBE_COMPUTE_BKT_HASH_ITERATION(0); + + /* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the VLAN until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + for (i = 1; i <= 15; i++) + TXGBE_COMPUTE_BKT_HASH_ITERATION(i); + + /* Limit hash to 13 bits since max bucket count is 8K. + * Store result at the end of the input stream. + */ + input->formatted.bkt_hash = (__force __be16)(bucket_hash & 0x1FFF); +} + +static int txgbe_fdir_check_cmd_complete(struct wx *wx) +{ + u32 val; + + return read_poll_timeout_atomic(rd32, val, + !(val & TXGBE_RDB_FDIR_CMD_CMD_MASK), + 10, 100, false, + wx, TXGBE_RDB_FDIR_CMD); +} + +/** + * txgbe_fdir_add_signature_filter - Adds a signature hash filter + * @wx: pointer to hardware structure + * @input: unique input dword + * @common: compressed common input dword + * @queue: queue index to direct traffic to + * + * @return: 0 on success and negative on failure + **/ +static int txgbe_fdir_add_signature_filter(struct wx *wx, + union txgbe_atr_hash_dword input, + union txgbe_atr_hash_dword common, + u8 queue) +{ + u32 fdirhashcmd, fdircmd; + u8 flow_type; + int err; + + /* Get the flow_type in order to program FDIRCMD properly + * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 + * fifth is FDIRCMD.TUNNEL_FILTER + */ + flow_type = input.formatted.flow_type; + switch (flow_type) { + case TXGBE_ATR_FLOW_TYPE_TCPV4: + case TXGBE_ATR_FLOW_TYPE_UDPV4: + case TXGBE_ATR_FLOW_TYPE_SCTPV4: + case TXGBE_ATR_FLOW_TYPE_TCPV6: + case TXGBE_ATR_FLOW_TYPE_UDPV6: + case TXGBE_ATR_FLOW_TYPE_SCTPV6: + break; + default: + wx_err(wx, "Error on flow type input\n"); + return -EINVAL; + } + + /* configure FDIRCMD register */ + fdircmd = TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW | + TXGBE_RDB_FDIR_CMD_FILTER_UPDATE | + TXGBE_RDB_FDIR_CMD_LAST | TXGBE_RDB_FDIR_CMD_QUEUE_EN; + fdircmd |= TXGBE_RDB_FDIR_CMD_FLOW_TYPE(flow_type); + fdircmd |= TXGBE_RDB_FDIR_CMD_RX_QUEUE(queue); + + txgbe_atr_compute_sig_hash(input, common, &fdirhashcmd); + fdirhashcmd |= TXGBE_RDB_FDIR_HASH_BUCKET_VALID; + wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhashcmd); + wr32(wx, TXGBE_RDB_FDIR_CMD, fdircmd); + + wx_dbg(wx, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); + + err = txgbe_fdir_check_cmd_complete(wx); + if (err) + wx_err(wx, "Flow Director command did not complete!\n"); + + return err; +} + +void txgbe_atr(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype) +{ + union txgbe_atr_hash_dword common = { .dword = 0 }; + union txgbe_atr_hash_dword input = { .dword = 0 }; + struct wx_q_vector *q_vector = ring->q_vector; + struct wx_dec_ptype dptype; + union network_header { + struct ipv6hdr *ipv6; + struct iphdr *ipv4; + void *raw; + } hdr; + struct tcphdr *th; + + /* if ring doesn't have a interrupt vector, cannot perform ATR */ + if (!q_vector) + return; + + ring->atr_count++; + dptype = wx_decode_ptype(ptype); + if (dptype.etype) { + if (WX_PTYPE_TYPL4(ptype) != WX_PTYPE_TYP_TCP) + return; + hdr.raw = (void *)skb_inner_network_header(first->skb); + th = inner_tcp_hdr(first->skb); + } else { + if (WX_PTYPE_PKT(ptype) != WX_PTYPE_PKT_IP || + WX_PTYPE_TYPL4(ptype) != WX_PTYPE_TYP_TCP) + return; + hdr.raw = (void *)skb_network_header(first->skb); + th = tcp_hdr(first->skb); + } + + /* skip this packet since it is invalid or the socket is closing */ + if (!th || th->fin) + return; + + /* sample on all syn packets or once every atr sample count */ + if (!th->syn && ring->atr_count < ring->atr_sample_rate) + return; + + /* reset sample count */ + ring->atr_count = 0; + + /* src and dst are inverted, think how the receiver sees them + * + * The input is broken into two sections, a non-compressed section + * containing vm_pool, vlan_id, and flow_type. The rest of the data + * is XORed together and stored in the compressed dword. + */ + input.formatted.vlan_id = htons((u16)ptype); + + /* since src port and flex bytes occupy the same word XOR them together + * and write the value to source port portion of compressed dword + */ + if (first->tx_flags & WX_TX_FLAGS_SW_VLAN) + common.port.src ^= th->dest ^ first->skb->protocol; + else if (first->tx_flags & WX_TX_FLAGS_HW_VLAN) + common.port.src ^= th->dest ^ first->skb->vlan_proto; + else + common.port.src ^= th->dest ^ first->protocol; + common.port.dst ^= th->source; + + if (WX_PTYPE_PKT_IPV6 & WX_PTYPE_PKT(ptype)) { + input.formatted.flow_type = TXGBE_ATR_FLOW_TYPE_TCPV6; + common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ + hdr.ipv6->saddr.s6_addr32[1] ^ + hdr.ipv6->saddr.s6_addr32[2] ^ + hdr.ipv6->saddr.s6_addr32[3] ^ + hdr.ipv6->daddr.s6_addr32[0] ^ + hdr.ipv6->daddr.s6_addr32[1] ^ + hdr.ipv6->daddr.s6_addr32[2] ^ + hdr.ipv6->daddr.s6_addr32[3]; + } else { + input.formatted.flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4; + common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; + } + + /* This assumes the Rx queue and Tx queue are bound to the same CPU */ + txgbe_fdir_add_signature_filter(q_vector->wx, input, common, + ring->queue_index); +} + +int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask) +{ + u32 fdirm = 0, fdirtcpm = 0, flex = 0; + int index, offset; + + /* Program the relevant mask registers. If src/dst_port or src/dst_addr + * are zero, then assume a full mask for that field. Also assume that + * a VLAN of 0 is unspecified, so mask that out as well. L4type + * cannot be masked out in this implementation. + * + * This also assumes IPv4 only. IPv6 masking isn't supported at this + * point in time. + */ + + /* verify bucket hash is cleared on hash generation */ + if (input_mask->formatted.bkt_hash) + wx_dbg(wx, "bucket hash should always be 0 in mask\n"); + + /* Program FDIRM and verify partial masks */ + switch (input_mask->formatted.vm_pool & 0x7F) { + case 0x0: + fdirm |= TXGBE_RDB_FDIR_OTHER_MSK_POOL; + break; + case 0x7F: + break; + default: + wx_err(wx, "Error on vm pool mask\n"); + return -EINVAL; + } + + switch (input_mask->formatted.flow_type & TXGBE_ATR_L4TYPE_MASK) { + case 0x0: + fdirm |= TXGBE_RDB_FDIR_OTHER_MSK_L4P; + if (input_mask->formatted.dst_port || + input_mask->formatted.src_port) { + wx_err(wx, "Error on src/dst port mask\n"); + return -EINVAL; + } + break; + case TXGBE_ATR_L4TYPE_MASK: + break; + default: + wx_err(wx, "Error on flow type mask\n"); + return -EINVAL; + } + + /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ + wr32(wx, TXGBE_RDB_FDIR_OTHER_MSK, fdirm); + + index = VMDQ_P(0) / 4; + offset = VMDQ_P(0) % 4; + flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index)); + flex &= ~(TXGBE_RDB_FDIR_FLEX_CFG_FIELD0 << (offset * 8)); + flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC | + TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6)) << (offset * 8); + + switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) { + case 0x0000: + /* Mask Flex Bytes */ + flex |= TXGBE_RDB_FDIR_FLEX_CFG_MSK << (offset * 8); + break; + case 0xFFFF: + break; + default: + wx_err(wx, "Error on flexible byte mask\n"); + return -EINVAL; + } + wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index), flex); + + /* store the TCP/UDP port masks, bit reversed from port layout */ + fdirtcpm = ntohs(input_mask->formatted.dst_port); + fdirtcpm <<= TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT; + fdirtcpm |= ntohs(input_mask->formatted.src_port); + + /* write both the same so that UDP and TCP use the same mask */ + wr32(wx, TXGBE_RDB_FDIR_TCP_MSK, ~fdirtcpm); + wr32(wx, TXGBE_RDB_FDIR_UDP_MSK, ~fdirtcpm); + wr32(wx, TXGBE_RDB_FDIR_SCTP_MSK, ~fdirtcpm); + + /* store source and destination IP masks (little-enian) */ + wr32(wx, TXGBE_RDB_FDIR_SA4_MSK, + ntohl(~input_mask->formatted.src_ip[0])); + wr32(wx, TXGBE_RDB_FDIR_DA4_MSK, + ntohl(~input_mask->formatted.dst_ip[0])); + + return 0; +} + +int txgbe_fdir_write_perfect_filter(struct wx *wx, + union txgbe_atr_input *input, + u16 soft_id, u8 queue) +{ + u32 fdirport, fdirvlan, fdirhash, fdircmd; + int err = 0; + + /* currently IPv6 is not supported, must be programmed with 0 */ + wr32(wx, TXGBE_RDB_FDIR_IP6(2), ntohl(input->formatted.src_ip[0])); + wr32(wx, TXGBE_RDB_FDIR_IP6(1), ntohl(input->formatted.src_ip[1])); + wr32(wx, TXGBE_RDB_FDIR_IP6(0), ntohl(input->formatted.src_ip[2])); + + /* record the source address (little-endian) */ + wr32(wx, TXGBE_RDB_FDIR_SA, ntohl(input->formatted.src_ip[0])); + + /* record the first 32 bits of the destination address + * (little-endian) + */ + wr32(wx, TXGBE_RDB_FDIR_DA, ntohl(input->formatted.dst_ip[0])); + + /* record source and destination port (little-endian)*/ + fdirport = ntohs(input->formatted.dst_port); + fdirport <<= TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT; + fdirport |= ntohs(input->formatted.src_port); + wr32(wx, TXGBE_RDB_FDIR_PORT, fdirport); + + /* record packet type and flex_bytes (little-endian) */ + fdirvlan = ntohs(input->formatted.flex_bytes); + fdirvlan <<= TXGBE_RDB_FDIR_FLEX_FLEX_SHIFT; + fdirvlan |= ntohs(input->formatted.vlan_id); + wr32(wx, TXGBE_RDB_FDIR_FLEX, fdirvlan); + + /* configure FDIRHASH register */ + fdirhash = (__force u32)input->formatted.bkt_hash | + TXGBE_RDB_FDIR_HASH_BUCKET_VALID | + TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX(soft_id); + wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhash); + + /* flush all previous writes to make certain registers are + * programmed prior to issuing the command + */ + WX_WRITE_FLUSH(wx); + + /* configure FDIRCMD register */ + fdircmd = TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW | + TXGBE_RDB_FDIR_CMD_FILTER_UPDATE | + TXGBE_RDB_FDIR_CMD_LAST | TXGBE_RDB_FDIR_CMD_QUEUE_EN; + if (queue == TXGBE_RDB_FDIR_DROP_QUEUE) + fdircmd |= TXGBE_RDB_FDIR_CMD_DROP; + fdircmd |= TXGBE_RDB_FDIR_CMD_FLOW_TYPE(input->formatted.flow_type); + fdircmd |= TXGBE_RDB_FDIR_CMD_RX_QUEUE(queue); + fdircmd |= TXGBE_RDB_FDIR_CMD_VT_POOL(input->formatted.vm_pool); + + wr32(wx, TXGBE_RDB_FDIR_CMD, fdircmd); + err = txgbe_fdir_check_cmd_complete(wx); + if (err) + wx_err(wx, "Flow Director command did not complete!\n"); + + return err; +} + +int txgbe_fdir_erase_perfect_filter(struct wx *wx, + union txgbe_atr_input *input, + u16 soft_id) +{ + u32 fdirhash, fdircmd; + int err = 0; + + /* configure FDIRHASH register */ + fdirhash = (__force u32)input->formatted.bkt_hash; + fdirhash |= TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX(soft_id); + wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhash); + + /* flush hash to HW */ + WX_WRITE_FLUSH(wx); + + /* Query if filter is present */ + wr32(wx, TXGBE_RDB_FDIR_CMD, TXGBE_RDB_FDIR_CMD_CMD_QUERY_REM_FILT); + + err = txgbe_fdir_check_cmd_complete(wx); + if (err) { + wx_err(wx, "Flow Director command did not complete!\n"); + return err; + } + + fdircmd = rd32(wx, TXGBE_RDB_FDIR_CMD); + /* if filter exists in hardware then remove it */ + if (fdircmd & TXGBE_RDB_FDIR_CMD_FILTER_VALID) { + wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhash); + WX_WRITE_FLUSH(wx); + wr32(wx, TXGBE_RDB_FDIR_CMD, + TXGBE_RDB_FDIR_CMD_CMD_REMOVE_FLOW); + } + + return 0; +} + +/** + * txgbe_fdir_enable - Initialize Flow Director control registers + * @wx: pointer to hardware structure + * @fdirctrl: value to write to flow director control register + **/ +static void txgbe_fdir_enable(struct wx *wx, u32 fdirctrl) +{ + u32 val; + int ret; + + /* Prime the keys for hashing */ + wr32(wx, TXGBE_RDB_FDIR_HKEY, TXGBE_ATR_BUCKET_HASH_KEY); + wr32(wx, TXGBE_RDB_FDIR_SKEY, TXGBE_ATR_SIGNATURE_HASH_KEY); + + wr32(wx, TXGBE_RDB_FDIR_CTL, fdirctrl); + WX_WRITE_FLUSH(wx); + ret = read_poll_timeout(rd32, val, val & TXGBE_RDB_FDIR_CTL_INIT_DONE, + 1000, 10000, false, wx, TXGBE_RDB_FDIR_CTL); + + if (ret < 0) + wx_dbg(wx, "Flow Director poll time exceeded!\n"); +} + +/** + * txgbe_init_fdir_signature -Initialize Flow Director sig filters + * @wx: pointer to hardware structure + **/ +static void txgbe_init_fdir_signature(struct wx *wx) +{ + u32 fdirctrl = TXGBE_FDIR_PBALLOC_64K; + int index = VMDQ_P(0) / 4; + int offset = VMDQ_P(0) % 4; + u32 flex = 0; + + flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index)); + flex &= ~(TXGBE_RDB_FDIR_FLEX_CFG_FIELD0 << (offset * 8)); + + flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC | + TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6)) << (offset * 8); + wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index), flex); + + /* Continue setup of fdirctrl register bits: + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 filters are left + */ + fdirctrl |= TXGBE_RDB_FDIR_CTL_HASH_BITS(0xF) | + TXGBE_RDB_FDIR_CTL_MAX_LENGTH(0xA) | + TXGBE_RDB_FDIR_CTL_FULL_THRESH(4); + + /* write hashes and fdirctrl register, poll for completion */ + txgbe_fdir_enable(wx, fdirctrl); +} + +/** + * txgbe_init_fdir_perfect - Initialize Flow Director perfect filters + * @wx: pointer to hardware structure + **/ +static void txgbe_init_fdir_perfect(struct wx *wx) +{ + u32 fdirctrl = TXGBE_FDIR_PBALLOC_64K; + + /* Continue setup of fdirctrl register bits: + * Turn perfect match filtering on + * Report hash in RSS field of Rx wb descriptor + * Initialize the drop queue + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 (0x4 * 16) filters are left + */ + fdirctrl |= TXGBE_RDB_FDIR_CTL_PERFECT_MATCH | + TXGBE_RDB_FDIR_CTL_DROP_Q(TXGBE_RDB_FDIR_DROP_QUEUE) | + TXGBE_RDB_FDIR_CTL_HASH_BITS(0xF) | + TXGBE_RDB_FDIR_CTL_MAX_LENGTH(0xA) | + TXGBE_RDB_FDIR_CTL_FULL_THRESH(4); + + /* write hashes and fdirctrl register, poll for completion */ + txgbe_fdir_enable(wx, fdirctrl); +} + +static void txgbe_fdir_filter_restore(struct wx *wx) +{ + struct txgbe_fdir_filter *filter; + struct txgbe *txgbe = wx->priv; + struct hlist_node *node; + u8 queue = 0; + int ret = 0; + + spin_lock(&txgbe->fdir_perfect_lock); + + if (!hlist_empty(&txgbe->fdir_filter_list)) + ret = txgbe_fdir_set_input_mask(wx, &txgbe->fdir_mask); + + if (ret) + goto unlock; + + hlist_for_each_entry_safe(filter, node, + &txgbe->fdir_filter_list, fdir_node) { + if (filter->action == TXGBE_RDB_FDIR_DROP_QUEUE) { + queue = TXGBE_RDB_FDIR_DROP_QUEUE; + } else { + u32 ring = ethtool_get_flow_spec_ring(filter->action); + + if (ring >= wx->num_rx_queues) { + wx_err(wx, "FDIR restore failed, ring:%u\n", + ring); + continue; + } + + /* Map the ring onto the absolute queue index */ + queue = wx->rx_ring[ring]->reg_idx; + } + + ret = txgbe_fdir_write_perfect_filter(wx, + &filter->filter, + filter->sw_idx, + queue); + if (ret) + wx_err(wx, "FDIR restore failed, index:%u\n", + filter->sw_idx); + } + +unlock: + spin_unlock(&txgbe->fdir_perfect_lock); +} + +void txgbe_configure_fdir(struct wx *wx) +{ + wx_disable_sec_rx_path(wx); + + if (test_bit(WX_FLAG_FDIR_HASH, wx->flags)) { + txgbe_init_fdir_signature(wx); + } else if (test_bit(WX_FLAG_FDIR_PERFECT, wx->flags)) { + txgbe_init_fdir_perfect(wx); + txgbe_fdir_filter_restore(wx); + } + + wx_enable_sec_rx_path(wx); +} + +void txgbe_fdir_filter_exit(struct wx *wx) +{ + struct txgbe_fdir_filter *filter; + struct txgbe *txgbe = wx->priv; + struct hlist_node *node; + + spin_lock(&txgbe->fdir_perfect_lock); + + hlist_for_each_entry_safe(filter, node, + &txgbe->fdir_filter_list, fdir_node) { + hlist_del(&filter->fdir_node); + kfree(filter); + } + txgbe->fdir_filter_count = 0; + + spin_unlock(&txgbe->fdir_perfect_lock); +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.h new file mode 100644 index 000000000000..1f44ce60becb --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_FDIR_H_ +#define _TXGBE_FDIR_H_ + +void txgbe_atr_compute_perfect_hash(union txgbe_atr_input *input, + union txgbe_atr_input *input_mask); +void txgbe_atr(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype); +int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask); +int txgbe_fdir_write_perfect_filter(struct wx *wx, + union txgbe_atr_input *input, + u16 soft_id, u8 queue); +int txgbe_fdir_erase_perfect_filter(struct wx *wx, + union txgbe_atr_input *input, + u16 soft_id); +void txgbe_configure_fdir(struct wx *wx); +void txgbe_fdir_filter_exit(struct wx *wx); + +#endif /* _TXGBE_FDIR_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index d6b2b3c781b6..e551ae0e2069 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -99,12 +99,17 @@ static int txgbe_calc_eeprom_checksum(struct wx *wx, u16 *checksum) } local_buffer = eeprom_ptrs; - for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++) + for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++) { + if (wx->mac.type == wx_mac_aml) { + if (i >= TXGBE_EEPROM_I2C_SRART_PTR && + i < TXGBE_EEPROM_I2C_END_PTR) + local_buffer[i] = 0xffff; + } if (i != wx->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM) *checksum += local_buffer[i]; + } - if (eeprom_ptrs) - kvfree(eeprom_ptrs); + kvfree(eeprom_ptrs); *checksum = TXGBE_EEPROM_SUM - *checksum; @@ -183,7 +188,7 @@ int txgbe_reset_hw(struct wx *wx) if (status != 0) return status; - if (wx->media_type != sp_media_copper) { + if (wx->media_type != wx_media_copper) { u32 val; val = WX_MIS_RST_LAN_RST(wx->bus.func); @@ -198,6 +203,12 @@ int txgbe_reset_hw(struct wx *wx) txgbe_reset_misc(wx); + if (wx->mac.type != wx_mac_sp) { + wr32(wx, TXGBE_PX_PF_BME, 0x1); + wr32m(wx, TXGBE_RDM_RSC_CTL, TXGBE_RDM_RSC_CTL_FREE_CTL, + TXGBE_RDM_RSC_CTL_FREE_CTL); + } + wx_clear_hw_cntrs(wx); /* Store the permanent mac address */ @@ -207,7 +218,7 @@ int txgbe_reset_hw(struct wx *wx) * clear the multicast table. Also reset num_rar_entries to 128, * since we modify this value when programming the SAN MAC address. */ - wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES; + wx->mac.num_rar_entries = TXGBE_RAR_ENTRIES; wx_init_rx_addrs(wx); pci_set_master(wx->pdev); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c index b3e3605d1edb..20b9a28bcb55 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c @@ -6,10 +6,13 @@ #include "../libwx/wx_type.h" #include "../libwx/wx_lib.h" +#include "../libwx/wx_ptp.h" #include "../libwx/wx_hw.h" +#include "../libwx/wx_sriov.h" #include "txgbe_type.h" #include "txgbe_phy.h" #include "txgbe_irq.h" +#include "txgbe_aml.h" /** * txgbe_irq_enable - Enable default interrupt generation settings @@ -18,7 +21,14 @@ **/ void txgbe_irq_enable(struct wx *wx, bool queues) { - wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK); + u32 misc_ien = TXGBE_PX_MISC_IEN_MASK; + + if (wx->mac.type == wx_mac_aml) { + misc_ien |= TXGBE_PX_MISC_GPIO; + txgbe_gpio_init_aml(wx); + } + + wr32(wx, WX_PX_MISC_IEN, misc_ien); /* unmask interrupt */ wx_intr_enable(wx, TXGBE_INTR_MISC); @@ -27,57 +37,19 @@ void txgbe_irq_enable(struct wx *wx, bool queues) } /** - * txgbe_intr - msi/legacy mode Interrupt Handler - * @irq: interrupt number - * @data: pointer to a network interface device structure - **/ -static irqreturn_t txgbe_intr(int __always_unused irq, void *data) -{ - struct wx_q_vector *q_vector; - struct wx *wx = data; - struct pci_dev *pdev; - u32 eicr; - - q_vector = wx->q_vector[0]; - pdev = wx->pdev; - - eicr = wx_misc_isb(wx, WX_ISB_VEC0); - if (!eicr) { - /* shared interrupt alert! - * the interrupt that we masked before the ICR read. - */ - if (netif_running(wx->netdev)) - txgbe_irq_enable(wx, true); - return IRQ_NONE; /* Not our interrupt */ - } - wx->isb_mem[WX_ISB_VEC0] = 0; - if (!(pdev->msi_enabled)) - wr32(wx, WX_PX_INTA, 1); - - wx->isb_mem[WX_ISB_MISC] = 0; - /* would disable interrupts here but it is auto disabled */ - napi_schedule_irqoff(&q_vector->napi); - - /* re-enable link(maybe) and non-queue interrupts, no flush. - * txgbe_poll will re-enable the queue interrupts - */ - if (netif_running(wx->netdev)) - txgbe_irq_enable(wx, false); - - return IRQ_HANDLED; -} - -/** - * txgbe_request_msix_irqs - Initialize MSI-X interrupts + * txgbe_request_queue_irqs - Initialize MSI-X queue interrupts * @wx: board private structure * - * Allocate MSI-X vectors and request interrupts from the kernel. + * Allocate MSI-X queue vectors and request interrupts from the kernel. **/ -static int txgbe_request_msix_irqs(struct wx *wx) +int txgbe_request_queue_irqs(struct wx *wx) { struct net_device *netdev = wx->netdev; int vector, err; + if (!wx->pdev->msix_enabled) + return 0; + for (vector = 0; vector < wx->num_q_vectors; vector++) { struct wx_q_vector *q_vector = wx->q_vector[vector]; struct msix_entry *entry = &wx->msix_q_entries[vector]; @@ -110,50 +82,22 @@ free_queue_irqs: return err; } -/** - * txgbe_request_irq - initialize interrupts - * @wx: board private structure - * - * Attempt to configure interrupts using the best available - * capabilities of the hardware and kernel. - **/ -int txgbe_request_irq(struct wx *wx) +static int txgbe_request_link_irq(struct txgbe *txgbe) { - struct net_device *netdev = wx->netdev; - struct pci_dev *pdev = wx->pdev; - int err; - - if (pdev->msix_enabled) - err = txgbe_request_msix_irqs(wx); - else if (pdev->msi_enabled) - err = request_irq(wx->pdev->irq, &txgbe_intr, 0, - netdev->name, wx); - else - err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED, - netdev->name, wx); - - if (err) - wx_err(wx, "request_irq failed, Error %d\n", err); - - return err; + txgbe->link_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK); + return request_threaded_irq(txgbe->link_irq, NULL, + txgbe_link_irq_handler, + IRQF_ONESHOT, "txgbe-link-irq", txgbe); } static int txgbe_request_gpio_irq(struct txgbe *txgbe) { txgbe->gpio_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO); return request_threaded_irq(txgbe->gpio_irq, NULL, - txgbe_gpio_irq_handler, + txgbe_gpio_irq_handler_aml, IRQF_ONESHOT, "txgbe-gpio-irq", txgbe); } -static int txgbe_request_link_irq(struct txgbe *txgbe) -{ - txgbe->link_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK); - return request_threaded_irq(txgbe->link_irq, NULL, - txgbe_link_irq_handler, - IRQF_ONESHOT, "txgbe-link-irq", txgbe); -} - static const struct irq_chip txgbe_irq_chip = { .name = "txgbe-misc-irq", }; @@ -178,22 +122,65 @@ static const struct irq_domain_ops txgbe_misc_irq_domain_ops = { static irqreturn_t txgbe_misc_irq_handle(int irq, void *data) { + struct wx_q_vector *q_vector; + struct txgbe *txgbe = data; + struct wx *wx = txgbe->wx; + u32 eicr; + + if (wx->pdev->msix_enabled) { + eicr = wx_misc_isb(wx, WX_ISB_MISC); + txgbe->eicr = eicr; + if (eicr & TXGBE_PX_MISC_IC_VF_MBOX) { + wx_msg_task(txgbe->wx); + wx_intr_enable(wx, TXGBE_INTR_MISC); + } + return IRQ_WAKE_THREAD; + } + + eicr = wx_misc_isb(wx, WX_ISB_VEC0); + if (!eicr) { + /* shared interrupt alert! + * the interrupt that we masked before the ICR read. + */ + if (netif_running(wx->netdev)) + txgbe_irq_enable(wx, true); + return IRQ_NONE; /* Not our interrupt */ + } + wx->isb_mem[WX_ISB_VEC0] = 0; + if (!(wx->pdev->msi_enabled)) + wr32(wx, WX_PX_INTA, 1); + + /* would disable interrupts here but it is auto disabled */ + q_vector = wx->q_vector[0]; + napi_schedule_irqoff(&q_vector->napi); + + txgbe->eicr = wx_misc_isb(wx, WX_ISB_MISC); + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t txgbe_misc_irq_thread_fn(int irq, void *data) +{ struct txgbe *txgbe = data; struct wx *wx = txgbe->wx; unsigned int nhandled = 0; unsigned int sub_irq; u32 eicr; - eicr = wx_misc_isb(wx, WX_ISB_MISC); + eicr = txgbe->eicr; + if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN | + TXGBE_PX_MISC_ETH_AN)) { + sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK); + handle_nested_irq(sub_irq); + nhandled++; + } if (eicr & TXGBE_PX_MISC_GPIO) { sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO); handle_nested_irq(sub_irq); nhandled++; } - if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN | - TXGBE_PX_MISC_ETH_AN)) { - sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK); - handle_nested_irq(sub_irq); + if (unlikely(eicr & TXGBE_PX_MISC_IC_TIMESYNC)) { + wx_ptp_check_pps_event(wx); nhandled++; } @@ -215,7 +202,12 @@ static void txgbe_del_irq_domain(struct txgbe *txgbe) void txgbe_free_misc_irq(struct txgbe *txgbe) { - free_irq(txgbe->gpio_irq, txgbe); + if (txgbe->wx->mac.type == wx_mac_aml40) + return; + + if (txgbe->wx->mac.type == wx_mac_aml) + free_irq(txgbe->gpio_irq, txgbe); + free_irq(txgbe->link_irq, txgbe); free_irq(txgbe->misc.irq, txgbe); txgbe_del_irq_domain(txgbe); @@ -223,12 +215,16 @@ void txgbe_free_misc_irq(struct txgbe *txgbe) int txgbe_setup_misc_irq(struct txgbe *txgbe) { + unsigned long flags = IRQF_ONESHOT; struct wx *wx = txgbe->wx; int hwirq, err; - txgbe->misc.nirqs = 2; - txgbe->misc.domain = irq_domain_add_simple(NULL, txgbe->misc.nirqs, 0, - &txgbe_misc_irq_domain_ops, txgbe); + if (wx->mac.type == wx_mac_aml40) + goto skip_sp_irq; + + txgbe->misc.nirqs = TXGBE_IRQ_MAX; + txgbe->misc.domain = irq_domain_create_simple(NULL, txgbe->misc.nirqs, 0, + &txgbe_misc_irq_domain_ops, txgbe); if (!txgbe->misc.domain) return -ENOMEM; @@ -236,30 +232,39 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe) irq_create_mapping(txgbe->misc.domain, hwirq); txgbe->misc.chip = txgbe_irq_chip; - if (wx->pdev->msix_enabled) + if (wx->pdev->msix_enabled) { txgbe->misc.irq = wx->msix_entry->vector; - else + } else { txgbe->misc.irq = wx->pdev->irq; + if (!wx->pdev->msi_enabled) + flags |= IRQF_SHARED; + } - err = request_threaded_irq(txgbe->misc.irq, NULL, - txgbe_misc_irq_handle, - IRQF_ONESHOT, + err = request_threaded_irq(txgbe->misc.irq, txgbe_misc_irq_handle, + txgbe_misc_irq_thread_fn, + flags, wx->netdev->name, txgbe); if (err) goto del_misc_irq; - err = txgbe_request_gpio_irq(txgbe); + err = txgbe_request_link_irq(txgbe); if (err) goto free_msic_irq; - err = txgbe_request_link_irq(txgbe); + if (wx->mac.type == wx_mac_sp) + goto skip_sp_irq; + + err = txgbe_request_gpio_irq(txgbe); if (err) - goto free_gpio_irq; + goto free_link_irq; + +skip_sp_irq: + wx->misc_irq_domain = true; return 0; -free_gpio_irq: - free_irq(txgbe->gpio_irq, txgbe); +free_link_irq: + free_irq(txgbe->link_irq, txgbe); free_msic_irq: free_irq(txgbe->misc.irq, txgbe); del_misc_irq: diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h index b77945e7a0f2..e6285b94625e 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h @@ -2,6 +2,6 @@ /* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */ void txgbe_irq_enable(struct wx *wx, bool queues); -int txgbe_request_irq(struct wx *wx); +int txgbe_request_queue_irqs(struct wx *wx); void txgbe_free_misc_irq(struct txgbe *txgbe); int txgbe_setup_misc_irq(struct txgbe *txgbe); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index bd4624d14ca0..f3d2778b8e35 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -8,16 +8,22 @@ #include <linux/string.h> #include <linux/etherdevice.h> #include <linux/phylink.h> +#include <net/udp_tunnel.h> #include <net/ip.h> #include <linux/if_vlan.h> #include "../libwx/wx_type.h" #include "../libwx/wx_lib.h" +#include "../libwx/wx_ptp.h" #include "../libwx/wx_hw.h" +#include "../libwx/wx_mbx.h" +#include "../libwx/wx_sriov.h" #include "txgbe_type.h" #include "txgbe_hw.h" #include "txgbe_phy.h" +#include "txgbe_aml.h" #include "txgbe_irq.h" +#include "txgbe_fdir.h" #include "txgbe_ethtool.h" char txgbe_driver_name[] = "txgbe"; @@ -33,6 +39,12 @@ char txgbe_driver_name[] = "txgbe"; static const struct pci_device_id txgbe_pci_tbl[] = { { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_SP1000), 0}, { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_WX1820), 0}, + { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5010), 0}, + { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5110), 0}, + { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5025), 0}, + { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5125), 0}, + { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5040), 0}, + { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5140), 0}, /* required last entry */ { .device = 0 } }; @@ -77,11 +89,63 @@ static int txgbe_enumerate_functions(struct wx *wx) return physfns; } +static void txgbe_sfp_detection_subtask(struct wx *wx) +{ + int err; + + if (!test_bit(WX_FLAG_NEED_SFP_RESET, wx->flags)) + return; + + /* wait for SFP module ready */ + msleep(200); + + err = txgbe_identify_sfp(wx); + if (err) + return; + + clear_bit(WX_FLAG_NEED_SFP_RESET, wx->flags); +} + +static void txgbe_link_config_subtask(struct wx *wx) +{ + int err; + + if (!test_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags)) + return; + + err = txgbe_set_phy_link(wx); + if (err) + return; + + clear_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags); +} + +/** + * txgbe_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +static void txgbe_service_task(struct work_struct *work) +{ + struct wx *wx = container_of(work, struct wx, service_task); + + txgbe_sfp_detection_subtask(wx); + txgbe_link_config_subtask(wx); + + wx_service_event_complete(wx); +} + +static void txgbe_init_service(struct wx *wx) +{ + timer_setup(&wx->service_timer, wx_service_timer, 0); + INIT_WORK(&wx->service_task, txgbe_service_task); + clear_bit(WX_STATE_SERVICE_SCHED, wx->state); +} + static void txgbe_up_complete(struct wx *wx) { struct net_device *netdev = wx->netdev; + u32 reg; - txgbe_reinit_gpio_intr(wx); wx_control_hw(wx, true); wx_configure_vectors(wx); @@ -89,7 +153,27 @@ static void txgbe_up_complete(struct wx *wx) smp_mb__before_atomic(); wx_napi_enable_all(wx); - phylink_start(wx->phylink); + switch (wx->mac.type) { + case wx_mac_aml40: + reg = rd32(wx, TXGBE_AML_MAC_TX_CFG); + reg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK; + reg |= TXGBE_AML_MAC_TX_CFG_SPEED_40G; + wr32(wx, WX_MAC_TX_CFG, reg); + txgbe_enable_sec_tx_path(wx); + netif_carrier_on(wx->netdev); + break; + case wx_mac_aml: + /* Enable TX laser */ + wr32m(wx, WX_GPIO_DR, TXGBE_GPIOBIT_1, 0); + txgbe_setup_link(wx); + phylink_start(wx->phylink); + break; + case wx_mac_sp: + phylink_start(wx->phylink); + break; + default: + break; + } /* clear any pending interrupts, may auto mask */ rd32(wx, WX_PX_IC(0)); @@ -99,6 +183,13 @@ static void txgbe_up_complete(struct wx *wx) /* enable transmits */ netif_tx_start_all_queues(netdev); + mod_timer(&wx->service_timer, jiffies); + + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_PFRSTD, + WX_CFG_PORT_CTL_PFRSTD); + /* update setting rx tx for all active vfs */ + wx_set_all_vfs(wx); } static void txgbe_reset(struct wx *wx) @@ -116,6 +207,9 @@ static void txgbe_reset(struct wx *wx) memcpy(old_addr, &wx->mac_table[0].addr, netdev->addr_len); wx_flush_sw_mac_table(wx); wx_mac_set_default_filter(wx, old_addr); + + if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) + wx_ptp_reset(wx); } static void txgbe_disable_device(struct wx *wx) @@ -138,12 +232,24 @@ static void txgbe_disable_device(struct wx *wx) wx_irq_disable(wx); wx_napi_disable_all(wx); + timer_delete_sync(&wx->service_timer); + if (wx->bus.func < 2) wr32m(wx, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wx->bus.func), 0); else wx_err(wx, "%s: invalid bus lan id %d\n", __func__, wx->bus.func); + if (wx->num_vfs) { + /* Clear EITR Select mapping */ + wr32(wx, WX_PX_ITRSEL, 0); + /* Mark all the VFs as inactive */ + for (i = 0; i < wx->num_vfs; i++) + wx->vfinfo[i].clear_to_send = 0; + /* update setting rx tx for all active vfs */ + wx_set_all_vfs(wx); + } + if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) { /* disable mac transmiter */ @@ -167,7 +273,22 @@ void txgbe_down(struct wx *wx) { txgbe_disable_device(wx); txgbe_reset(wx); - phylink_stop(wx->phylink); + + switch (wx->mac.type) { + case wx_mac_aml40: + netif_carrier_off(wx->netdev); + break; + case wx_mac_aml: + phylink_stop(wx->phylink); + /* Disable TX laser */ + wr32m(wx, WX_GPIO_DR, TXGBE_GPIOBIT_1, TXGBE_GPIOBIT_1); + break; + case wx_mac_sp: + phylink_stop(wx->phylink); + break; + default: + break; + } wx_clean_all_tx_rings(wx); wx_clean_all_rx_rings(wx); @@ -176,6 +297,7 @@ void txgbe_down(struct wx *wx) void txgbe_up(struct wx *wx) { wx_configure(wx); + wx_ptp_init(wx); txgbe_up_complete(wx); } @@ -192,6 +314,16 @@ static void txgbe_init_type_code(struct wx *wx) case TXGBE_DEV_ID_WX1820: wx->mac.type = wx_mac_sp; break; + case TXGBE_DEV_ID_AML5010: + case TXGBE_DEV_ID_AML5110: + case TXGBE_DEV_ID_AML5025: + case TXGBE_DEV_ID_AML5125: + wx->mac.type = wx_mac_aml; + break; + case TXGBE_DEV_ID_AML5040: + case TXGBE_DEV_ID_AML5140: + wx->mac.type = wx_mac_aml40; + break; default: wx->mac.type = wx_mac_unknown; break; @@ -199,25 +331,25 @@ static void txgbe_init_type_code(struct wx *wx) switch (device_type) { case TXGBE_ID_SFP: - wx->media_type = sp_media_fiber; + wx->media_type = wx_media_fiber; break; case TXGBE_ID_XAUI: case TXGBE_ID_SGMII: - wx->media_type = sp_media_copper; + wx->media_type = wx_media_copper; break; case TXGBE_ID_KR_KX_KX4: case TXGBE_ID_MAC_XAUI: case TXGBE_ID_MAC_SGMII: - wx->media_type = sp_media_backplane; + wx->media_type = wx_media_backplane; break; case TXGBE_ID_SFI_XAUI: if (wx->bus.func == 0) - wx->media_type = sp_media_fiber; + wx->media_type = wx_media_fiber; else - wx->media_type = sp_media_copper; + wx->media_type = wx_media_copper; break; default: - wx->media_type = sp_media_unknown; + wx->media_type = wx_media_unknown; break; } } @@ -231,13 +363,13 @@ static int txgbe_sw_init(struct wx *wx) u16 msix_count = 0; int err; - wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES; - wx->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES; - wx->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES; - wx->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE; - wx->mac.vft_size = TXGBE_SP_VFT_TBL_SIZE; - wx->mac.rx_pb_size = TXGBE_SP_RX_PB_SIZE; - wx->mac.tx_pb_size = TXGBE_SP_TDB_PB_SZ; + wx->mac.num_rar_entries = TXGBE_RAR_ENTRIES; + wx->mac.max_tx_queues = TXGBE_MAX_TXQ; + wx->mac.max_rx_queues = TXGBE_MAX_RXQ; + wx->mac.mcft_size = TXGBE_MC_TBL_SIZE; + wx->mac.vft_size = TXGBE_VFT_TBL_SIZE; + wx->mac.rx_pb_size = TXGBE_RX_PB_SIZE; + wx->mac.tx_pb_size = TXGBE_TDB_PB_SZ; /* PCI config space info */ err = wx_sw_init(wx); @@ -257,6 +389,17 @@ static int txgbe_sw_init(struct wx *wx) num_online_cpus()); wx->rss_enabled = true; + wx->ring_feature[RING_F_FDIR].limit = min_t(int, TXGBE_MAX_FDIR_INDICES, + num_online_cpus()); + set_bit(WX_FLAG_FDIR_CAPABLE, wx->flags); + set_bit(WX_FLAG_FDIR_HASH, wx->flags); + wx->atr_sample_rate = TXGBE_DEFAULT_ATR_SAMPLE_RATE; + wx->atr = txgbe_atr; + wx->configure_fdir = txgbe_configure_fdir; + + set_bit(WX_FLAG_RSC_CAPABLE, wx->flags); + set_bit(WX_FLAG_MULTI_64_FUNC, wx->flags); + /* enable itr by default in dynamic mode */ wx->rx_itr_setting = 1; wx->tx_itr_setting = 1; @@ -264,14 +407,37 @@ static int txgbe_sw_init(struct wx *wx) /* set default ring sizes */ wx->tx_ring_count = TXGBE_DEFAULT_TXD; wx->rx_ring_count = TXGBE_DEFAULT_RXD; + wx->mbx.size = WX_VXMAILBOX_SIZE; /* set default work limits */ wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK; wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK; + wx->setup_tc = txgbe_setup_tc; + wx->do_reset = txgbe_do_reset; + set_bit(0, &wx->fwd_bitmask); + + switch (wx->mac.type) { + case wx_mac_sp: + break; + case wx_mac_aml: + case wx_mac_aml40: + set_bit(WX_FLAG_SWFW_RING, wx->flags); + wx->swfw_index = 0; + break; + default: + break; + } + return 0; } +static void txgbe_init_fdir(struct txgbe *txgbe) +{ + txgbe->fdir_filter_count = 0; + spin_lock_init(&txgbe->fdir_perfect_lock); +} + /** * txgbe_open - Called when a network interface is made active * @netdev: network interface device structure @@ -292,9 +458,9 @@ static int txgbe_open(struct net_device *netdev) wx_configure(wx); - err = txgbe_request_irq(wx); + err = txgbe_request_queue_irqs(wx); if (err) - goto err_free_isb; + goto err_free_resources; /* Notify the stack of the actual queue counts. */ err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues); @@ -305,14 +471,16 @@ static int txgbe_open(struct net_device *netdev) if (err) goto err_free_irq; + wx_ptp_init(wx); + txgbe_up_complete(wx); return 0; err_free_irq: wx_free_irq(wx); -err_free_isb: - wx_free_isb_resources(wx); +err_free_resources: + wx_free_resources(wx); err_reset: txgbe_reset(wx); @@ -328,6 +496,7 @@ err_reset: */ static void txgbe_close_suspend(struct wx *wx) { + wx_ptp_suspend(wx); txgbe_disable_device(wx); wx_free_resources(wx); } @@ -347,9 +516,11 @@ static int txgbe_close(struct net_device *netdev) { struct wx *wx = netdev_priv(netdev); + wx_ptp_stop(wx); txgbe_down(wx); wx_free_irq(wx); wx_free_resources(wx); + txgbe_fdir_filter_exit(wx); wx_control_hw(wx, false); return 0; @@ -421,6 +592,67 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc) return 0; } +static void txgbe_reinit_locked(struct wx *wx) +{ + int err = 0; + + netif_trans_update(wx->netdev); + + err = wx_set_state_reset(wx); + if (err) { + wx_err(wx, "wait device reset timeout\n"); + return; + } + + txgbe_down(wx); + txgbe_up(wx); + + clear_bit(WX_STATE_RESETTING, wx->state); +} + +void txgbe_do_reset(struct net_device *netdev) +{ + struct wx *wx = netdev_priv(netdev); + + if (netif_running(netdev)) + txgbe_reinit_locked(wx); + else + txgbe_reset(wx); +} + +static int txgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table) +{ + struct wx *wx = netdev_priv(dev); + struct udp_tunnel_info ti; + + udp_tunnel_nic_get_port(dev, table, 0, &ti); + switch (ti.type) { + case UDP_TUNNEL_TYPE_VXLAN: + wr32(wx, TXGBE_CFG_VXLAN, ntohs(ti.port)); + break; + case UDP_TUNNEL_TYPE_VXLAN_GPE: + wr32(wx, TXGBE_CFG_VXLAN_GPE, ntohs(ti.port)); + break; + case UDP_TUNNEL_TYPE_GENEVE: + wr32(wx, TXGBE_CFG_GENEVE, ntohs(ti.port)); + break; + default: + break; + } + + return 0; +} + +static const struct udp_tunnel_nic_info txgbe_udp_tunnels = { + .sync_table = txgbe_udp_tunnel_sync, + .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, }, + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, + }, +}; + static const struct net_device_ops txgbe_netdev_ops = { .ndo_open = txgbe_open, .ndo_stop = txgbe_close, @@ -428,11 +660,15 @@ static const struct net_device_ops txgbe_netdev_ops = { .ndo_start_xmit = wx_xmit_frame, .ndo_set_rx_mode = wx_set_rx_mode, .ndo_set_features = wx_set_features, + .ndo_fix_features = wx_fix_features, + .ndo_features_check = wx_features_check, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = wx_set_mac, .ndo_get_stats64 = wx_get_stats64, .ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid, + .ndo_hwtstamp_set = wx_hwtstamp_set, + .ndo_hwtstamp_get = wx_hwtstamp_get, }; /** @@ -506,14 +742,19 @@ static int txgbe_probe(struct pci_dev *pdev, goto err_pci_release_regions; } + /* The sapphire supports up to 63 VFs per pf, but physical + * function also need one pool for basic networking. + */ + pci_sriov_set_totalvfs(pdev, TXGBE_MAX_VFS_DRV_LIMIT); wx->driver_name = txgbe_driver_name; txgbe_set_ethtool_ops(netdev); netdev->netdev_ops = &txgbe_netdev_ops; + netdev->udp_tunnel_nic_info = &txgbe_udp_tunnels; /* setup the private structure */ err = txgbe_sw_init(wx); if (err) - goto err_free_mac_table; + goto err_pci_release_regions; /* check if flash load is done after hw power up */ err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PERST); @@ -554,6 +795,7 @@ static int txgbe_probe(struct pci_dev *pdev, netdev->features |= NETIF_F_HIGHDMA; netdev->hw_features |= NETIF_F_GRO; netdev->features |= NETIF_F_GRO; + netdev->features |= NETIF_F_RX_UDP_TUNNEL_PORT; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; @@ -575,9 +817,11 @@ static int txgbe_probe(struct pci_dev *pdev, eth_hw_addr_set(netdev, wx->mac.perm_addr); wx_mac_set_default_filter(wx, wx->mac.perm_addr); + txgbe_init_service(wx); + err = wx_init_interrupt_scheme(wx); if (err) - goto err_free_mac_table; + goto err_cancel_service; /* Save off EEPROM version number and Option Rom version which * together make a unique identify for the eeprom @@ -620,6 +864,13 @@ static int txgbe_probe(struct pci_dev *pdev, if (etrack_id < 0x20010) dev_warn(&pdev->dev, "Please upgrade the firmware to 0x20010 or above.\n"); + err = txgbe_test_hostif(wx); + if (err != 0) { + dev_err(&pdev->dev, "Mismatched Firmware version\n"); + err = -EIO; + goto err_release_hw; + } + txgbe = devm_kzalloc(&pdev->dev, sizeof(*txgbe), GFP_KERNEL); if (!txgbe) { err = -ENOMEM; @@ -629,6 +880,8 @@ static int txgbe_probe(struct pci_dev *pdev, txgbe->wx = wx; wx->priv = txgbe; + txgbe_init_fdir(txgbe); + err = txgbe_setup_misc_irq(txgbe); if (err) goto err_release_hw; @@ -668,7 +921,11 @@ err_free_misc_irq: err_release_hw: wx_clear_interrupt_scheme(wx); wx_control_hw(wx, false); +err_cancel_service: + timer_delete_sync(&wx->service_timer); + cancel_work_sync(&wx->service_task); err_free_mac_table: + kfree(wx->rss_key); kfree(wx->mac_table); err_pci_release_regions: pci_release_selected_regions(pdev, @@ -693,11 +950,15 @@ static void txgbe_remove(struct pci_dev *pdev) struct txgbe *txgbe = wx->priv; struct net_device *netdev; + cancel_work_sync(&wx->service_task); + netdev = wx->netdev; + wx_disable_sriov(wx); unregister_netdev(netdev); txgbe_remove_phy(txgbe); txgbe_free_misc_irq(txgbe); + wx_free_isb_resources(wx); pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); @@ -715,11 +976,12 @@ static struct pci_driver txgbe_driver = { .probe = txgbe_probe, .remove = txgbe_remove, .shutdown = txgbe_shutdown, + .sriov_configure = wx_pci_sriov_configure, }; module_pci_driver(txgbe_driver); MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl); MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>"); -MODULE_DESCRIPTION("WangXun(R) 10 Gigabit PCI Express Network Driver"); +MODULE_DESCRIPTION("WangXun(R) 10/25/40 Gigabit PCI Express Network Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 93295916b1d2..03f1b9bc604d 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -15,8 +15,12 @@ #include "../libwx/wx_type.h" #include "../libwx/wx_lib.h" +#include "../libwx/wx_ptp.h" +#include "../libwx/wx_sriov.h" +#include "../libwx/wx_mbx.h" #include "../libwx/wx_hw.h" #include "txgbe_type.h" +#include "txgbe_aml.h" #include "txgbe_phy.h" #include "txgbe_hw.h" @@ -122,7 +126,7 @@ static int txgbe_pcs_write(struct mii_bus *bus, int addr, int devnum, int regnum static int txgbe_mdio_pcs_init(struct txgbe *txgbe) { struct mii_bus *mii_bus; - struct dw_xpcs *xpcs; + struct phylink_pcs *pcs; struct pci_dev *pdev; struct wx *wx; int ret = 0; @@ -147,11 +151,11 @@ static int txgbe_mdio_pcs_init(struct txgbe *txgbe) if (ret) return ret; - xpcs = xpcs_create_mdiodev(mii_bus, 0, PHY_INTERFACE_MODE_10GBASER); - if (IS_ERR(xpcs)) - return PTR_ERR(xpcs); + pcs = xpcs_create_pcs_mdiodev(mii_bus, 0); + if (IS_ERR(pcs)) + return PTR_ERR(pcs); - txgbe->xpcs = xpcs; + txgbe->pcs = pcs; return 0; } @@ -162,8 +166,8 @@ static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *confi struct wx *wx = phylink_to_wx(config); struct txgbe *txgbe = wx->priv; - if (interface == PHY_INTERFACE_MODE_10GBASER) - return &txgbe->xpcs->pcs; + if (wx->media_type != wx_media_copper) + return txgbe->pcs; return NULL; } @@ -179,6 +183,12 @@ static void txgbe_mac_link_down(struct phylink_config *config, struct wx *wx = phylink_to_wx(config); wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); + + wx->speed = SPEED_UNKNOWN; + if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) + wx_ptp_reset_cyclecounter(wx); + /* ping all the active vfs to let them know we are going down */ + wx_ping_all_vfs_with_link_status(wx, false); } static void txgbe_mac_link_up(struct phylink_config *config, @@ -215,6 +225,13 @@ static void txgbe_mac_link_up(struct phylink_config *config, wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); wdg = rd32(wx, WX_MAC_WDG_TIMEOUT); wr32(wx, WX_MAC_WDG_TIMEOUT, wdg); + + wx->speed = speed; + wx->last_rx_ptp_check = jiffies; + if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) + wx_ptp_reset_cyclecounter(wx); + /* ping all the active vfs to let them know we are going up */ + wx_ping_all_vfs_with_link_status(wx, true); } static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode, @@ -262,7 +279,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe) config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_100FD | MAC_SYM_PAUSE | MAC_ASYM_PAUSE; - if (wx->media_type == sp_media_copper) { + if (wx->media_type == wx_media_copper) { phy_mode = PHY_INTERFACE_MODE_XAUI; __set_bit(PHY_INTERFACE_MODE_XAUI, config->supported_interfaces); } else { @@ -302,7 +319,10 @@ irqreturn_t txgbe_link_irq_handler(int irq, void *data) status = rd32(wx, TXGBE_CFG_PORT_ST); up = !!(status & TXGBE_CFG_PORT_ST_LINK_UP); - phylink_mac_change(wx->phylink, up); + if (txgbe->pcs) + phylink_pcs_change(txgbe->pcs, up); + else + phylink_mac_change(wx->phylink, up); return IRQ_HANDLED; } @@ -358,169 +378,8 @@ static int txgbe_gpio_direction_out(struct gpio_chip *chip, unsigned int offset, return 0; } -static void txgbe_gpio_irq_ack(struct irq_data *d) -{ - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - irq_hw_number_t hwirq = irqd_to_hwirq(d); - struct wx *wx = gpiochip_get_data(gc); - unsigned long flags; - - raw_spin_lock_irqsave(&wx->gpio_lock, flags); - wr32(wx, WX_GPIO_EOI, BIT(hwirq)); - raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); -} - -static void txgbe_gpio_irq_mask(struct irq_data *d) -{ - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - irq_hw_number_t hwirq = irqd_to_hwirq(d); - struct wx *wx = gpiochip_get_data(gc); - unsigned long flags; - - gpiochip_disable_irq(gc, hwirq); - - raw_spin_lock_irqsave(&wx->gpio_lock, flags); - wr32m(wx, WX_GPIO_INTMASK, BIT(hwirq), BIT(hwirq)); - raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); -} - -static void txgbe_gpio_irq_unmask(struct irq_data *d) -{ - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - irq_hw_number_t hwirq = irqd_to_hwirq(d); - struct wx *wx = gpiochip_get_data(gc); - unsigned long flags; - - gpiochip_enable_irq(gc, hwirq); - - raw_spin_lock_irqsave(&wx->gpio_lock, flags); - wr32m(wx, WX_GPIO_INTMASK, BIT(hwirq), 0); - raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); -} - -static void txgbe_toggle_trigger(struct gpio_chip *gc, unsigned int offset) -{ - struct wx *wx = gpiochip_get_data(gc); - u32 pol, val; - - pol = rd32(wx, WX_GPIO_POLARITY); - val = rd32(wx, WX_GPIO_EXT); - - if (val & BIT(offset)) - pol &= ~BIT(offset); - else - pol |= BIT(offset); - - wr32(wx, WX_GPIO_POLARITY, pol); -} - -static int txgbe_gpio_set_type(struct irq_data *d, unsigned int type) -{ - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - irq_hw_number_t hwirq = irqd_to_hwirq(d); - struct wx *wx = gpiochip_get_data(gc); - u32 level, polarity, mask; - unsigned long flags; - - mask = BIT(hwirq); - - if (type & IRQ_TYPE_LEVEL_MASK) { - level = 0; - irq_set_handler_locked(d, handle_level_irq); - } else { - level = mask; - irq_set_handler_locked(d, handle_edge_irq); - } - - if (type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH) - polarity = mask; - else - polarity = 0; - - raw_spin_lock_irqsave(&wx->gpio_lock, flags); - - wr32m(wx, WX_GPIO_INTEN, mask, mask); - wr32m(wx, WX_GPIO_INTTYPE_LEVEL, mask, level); - if (type == IRQ_TYPE_EDGE_BOTH) - txgbe_toggle_trigger(gc, hwirq); - else - wr32m(wx, WX_GPIO_POLARITY, mask, polarity); - - raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); - - return 0; -} - -static const struct irq_chip txgbe_gpio_irq_chip = { - .name = "txgbe-gpio-irq", - .irq_ack = txgbe_gpio_irq_ack, - .irq_mask = txgbe_gpio_irq_mask, - .irq_unmask = txgbe_gpio_irq_unmask, - .irq_set_type = txgbe_gpio_set_type, - .flags = IRQCHIP_IMMUTABLE, - GPIOCHIP_IRQ_RESOURCE_HELPERS, -}; - -irqreturn_t txgbe_gpio_irq_handler(int irq, void *data) -{ - struct txgbe *txgbe = data; - struct wx *wx = txgbe->wx; - irq_hw_number_t hwirq; - unsigned long gpioirq; - struct gpio_chip *gc; - unsigned long flags; - - gpioirq = rd32(wx, WX_GPIO_INTSTATUS); - - gc = txgbe->gpio; - for_each_set_bit(hwirq, &gpioirq, gc->ngpio) { - int gpio = irq_find_mapping(gc->irq.domain, hwirq); - struct irq_data *d = irq_get_irq_data(gpio); - u32 irq_type = irq_get_trigger_type(gpio); - - txgbe_gpio_irq_ack(d); - handle_nested_irq(gpio); - - if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { - raw_spin_lock_irqsave(&wx->gpio_lock, flags); - txgbe_toggle_trigger(gc, hwirq); - raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); - } - } - - return IRQ_HANDLED; -} - -void txgbe_reinit_gpio_intr(struct wx *wx) -{ - struct txgbe *txgbe = wx->priv; - irq_hw_number_t hwirq; - unsigned long gpioirq; - struct gpio_chip *gc; - unsigned long flags; - - /* for gpio interrupt pending before irq enable */ - gpioirq = rd32(wx, WX_GPIO_INTSTATUS); - - gc = txgbe->gpio; - for_each_set_bit(hwirq, &gpioirq, gc->ngpio) { - int gpio = irq_find_mapping(gc->irq.domain, hwirq); - struct irq_data *d = irq_get_irq_data(gpio); - u32 irq_type = irq_get_trigger_type(gpio); - - txgbe_gpio_irq_ack(d); - - if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { - raw_spin_lock_irqsave(&wx->gpio_lock, flags); - txgbe_toggle_trigger(gc, hwirq); - raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); - } - } -} - static int txgbe_gpio_init(struct txgbe *txgbe) { - struct gpio_irq_chip *girq; struct gpio_chip *gc; struct device *dev; struct wx *wx; @@ -550,11 +409,6 @@ static int txgbe_gpio_init(struct txgbe *txgbe) gc->direction_input = txgbe_gpio_direction_in; gc->direction_output = txgbe_gpio_direction_out; - girq = &gc->irq; - gpio_irq_chip_set_chip(girq, &txgbe_gpio_irq_chip); - girq->default_type = IRQ_TYPE_NONE; - girq->handler = handle_bad_irq; - ret = devm_gpiochip_add_data(dev, gc, wx); if (ret) return ret; @@ -578,7 +432,7 @@ static int txgbe_clock_register(struct txgbe *txgbe) if (IS_ERR(clk)) return PTR_ERR(clk); - clock = clkdev_create(clk, NULL, clk_name); + clock = clkdev_create(clk, NULL, "%s", clk_name); if (!clock) { clk_unregister(clk); return -ENOMEM; @@ -688,8 +542,7 @@ static int txgbe_ext_phy_init(struct txgbe *txgbe) mii_bus->parent = &pdev->dev; mii_bus->phy_mask = GENMASK(31, 1); mii_bus->priv = wx; - snprintf(mii_bus->id, MII_BUS_ID_SIZE, "txgbe-%x", - (pdev->bus->number << 8) | pdev->devfn); + snprintf(mii_bus->id, MII_BUS_ID_SIZE, "txgbe-%x", pci_dev_id(pdev)); ret = devm_mdiobus_register(&pdev->dev, mii_bus); if (ret) { @@ -724,8 +577,18 @@ int txgbe_init_phy(struct txgbe *txgbe) struct wx *wx = txgbe->wx; int ret; - if (txgbe->wx->media_type == sp_media_copper) - return txgbe_ext_phy_init(txgbe); + switch (wx->mac.type) { + case wx_mac_aml40: + return 0; + case wx_mac_aml: + return txgbe_phylink_init_aml(txgbe); + case wx_mac_sp: + if (wx->media_type == wx_media_copper) + return txgbe_ext_phy_init(txgbe); + break; + default: + break; + } ret = txgbe_swnodes_register(txgbe); if (ret) { @@ -779,7 +642,7 @@ err_unregister_clk: err_destroy_phylink: phylink_destroy(wx->phylink); err_destroy_xpcs: - xpcs_destroy(txgbe->xpcs); + xpcs_destroy_pcs(txgbe->pcs); err_unregister_swnode: software_node_unregister_node_group(txgbe->nodes.group); @@ -788,10 +651,21 @@ err_unregister_swnode: void txgbe_remove_phy(struct txgbe *txgbe) { - if (txgbe->wx->media_type == sp_media_copper) { - phylink_disconnect_phy(txgbe->wx->phylink); + switch (txgbe->wx->mac.type) { + case wx_mac_aml40: + return; + case wx_mac_aml: phylink_destroy(txgbe->wx->phylink); return; + case wx_mac_sp: + if (txgbe->wx->media_type == wx_media_copper) { + phylink_disconnect_phy(txgbe->wx->phylink); + phylink_destroy(txgbe->wx->phylink); + return; + } + break; + default: + break; } platform_device_unregister(txgbe->sfp_dev); @@ -799,6 +673,6 @@ void txgbe_remove_phy(struct txgbe *txgbe) clkdev_drop(txgbe->clock); clk_unregister(txgbe->clk); phylink_destroy(txgbe->wx->phylink); - xpcs_destroy(txgbe->xpcs); + xpcs_destroy_pcs(txgbe->pcs); software_node_unregister_node_group(txgbe->nodes.group); } diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h index 8a026d804fe2..a32b19d71ea2 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h @@ -4,10 +4,8 @@ #ifndef _TXGBE_PHY_H_ #define _TXGBE_PHY_H_ -irqreturn_t txgbe_gpio_irq_handler(int irq, void *data); -void txgbe_reinit_gpio_intr(struct wx *wx); irqreturn_t txgbe_link_irq_handler(int irq, void *data); int txgbe_init_phy(struct txgbe *txgbe); void txgbe_remove_phy(struct txgbe *txgbe); -#endif /* _TXGBE_NODE_H_ */ +#endif /* _TXGBE_PHY_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 1b4ff50d5857..42ec815159e8 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -6,10 +6,18 @@ #include <linux/property.h> #include <linux/irq.h> +#include <linux/phy.h> +#include "../libwx/wx_type.h" /* Device IDs */ #define TXGBE_DEV_ID_SP1000 0x1001 #define TXGBE_DEV_ID_WX1820 0x2001 +#define TXGBE_DEV_ID_AML5010 0x5010 +#define TXGBE_DEV_ID_AML5110 0x5110 +#define TXGBE_DEV_ID_AML5025 0x5025 +#define TXGBE_DEV_ID_AML5125 0x5125 +#define TXGBE_DEV_ID_AML5040 0x5040 +#define TXGBE_DEV_ID_AML5140 0x5140 /* Subsystem IDs */ /* SFP */ @@ -44,6 +52,8 @@ /**************** SP Registers ****************************/ /* chip control Registers */ +#define TXGBE_MIS_RST 0x1000C +#define TXGBE_MIS_RST_MAC_RST(_i) BIT(20 - (_i) * 3) #define TXGBE_MIS_PRB_CTL 0x10010 #define TXGBE_MIS_PRB_CTL_LAN_UP(_i) BIT(1 - (_i)) /* FMGR Registers */ @@ -56,6 +66,11 @@ #define TXGBE_TS_CTL 0x10300 #define TXGBE_TS_CTL_EVAL_MD BIT(31) +/* MAC Misc Registers */ +#define TXGBE_MAC_MISC_CTL 0x11F00 +#define TXGBE_MAC_MISC_CTL_LINK_STS_MOD BIT(0) +#define TXGBE_MAC_MISC_CTL_LINK_PCS FIELD_PREP(BIT(0), 0) +#define TXGBE_MAC_MISC_CTL_LINK_BOTH FIELD_PREP(BIT(0), 1) /* GPIO register bit */ #define TXGBE_GPIOBIT_0 BIT(0) /* I:tx fault */ #define TXGBE_GPIOBIT_1 BIT(1) /* O:tx disabled */ @@ -67,20 +82,27 @@ /* Extended Interrupt Enable Set */ #define TXGBE_PX_MISC_ETH_LKDN BIT(8) #define TXGBE_PX_MISC_DEV_RST BIT(10) +#define TXGBE_PX_MISC_IC_TIMESYNC BIT(11) #define TXGBE_PX_MISC_ETH_EVENT BIT(17) #define TXGBE_PX_MISC_ETH_LK BIT(18) #define TXGBE_PX_MISC_ETH_AN BIT(19) #define TXGBE_PX_MISC_INT_ERR BIT(20) +#define TXGBE_PX_MISC_IC_VF_MBOX BIT(23) #define TXGBE_PX_MISC_GPIO BIT(26) #define TXGBE_PX_MISC_IEN_MASK \ (TXGBE_PX_MISC_ETH_LKDN | TXGBE_PX_MISC_DEV_RST | \ TXGBE_PX_MISC_ETH_EVENT | TXGBE_PX_MISC_ETH_LK | \ - TXGBE_PX_MISC_ETH_AN | TXGBE_PX_MISC_INT_ERR | \ - TXGBE_PX_MISC_GPIO) + TXGBE_PX_MISC_ETH_AN | TXGBE_PX_MISC_INT_ERR | \ + TXGBE_PX_MISC_IC_VF_MBOX | TXGBE_PX_MISC_IC_TIMESYNC) /* Port cfg registers */ #define TXGBE_CFG_PORT_ST 0x14404 #define TXGBE_CFG_PORT_ST_LINK_UP BIT(0) +#define TXGBE_CFG_PORT_ST_LINK_AML_25G BIT(3) +#define TXGBE_CFG_PORT_ST_LINK_AML_10G BIT(4) +#define TXGBE_CFG_VXLAN 0x14410 +#define TXGBE_CFG_VXLAN_GPE 0x14414 +#define TXGBE_CFG_GENEVE 0x14418 /* I2C registers */ #define TXGBE_I2C_BASE 0x14900 @@ -89,6 +111,66 @@ #define TXGBE_XPCS_IDA_ADDR 0x13000 #define TXGBE_XPCS_IDA_DATA 0x13004 +/********************************* Flow Director *****************************/ +#define TXGBE_RDB_FDIR_DROP_QUEUE 127 +#define TXGBE_RDB_FDIR_CTL 0x19500 +#define TXGBE_RDB_FDIR_CTL_INIT_DONE BIT(3) +#define TXGBE_RDB_FDIR_CTL_PERFECT_MATCH BIT(4) +#define TXGBE_RDB_FDIR_CTL_DROP_Q(v) FIELD_PREP(GENMASK(14, 8), v) +#define TXGBE_RDB_FDIR_CTL_HASH_BITS(v) FIELD_PREP(GENMASK(23, 20), v) +#define TXGBE_RDB_FDIR_CTL_MAX_LENGTH(v) FIELD_PREP(GENMASK(27, 24), v) +#define TXGBE_RDB_FDIR_CTL_FULL_THRESH(v) FIELD_PREP(GENMASK(31, 28), v) +#define TXGBE_RDB_FDIR_IP6(_i) (0x1950C + ((_i) * 4)) /* 0-2 */ +#define TXGBE_RDB_FDIR_SA 0x19518 +#define TXGBE_RDB_FDIR_DA 0x1951C +#define TXGBE_RDB_FDIR_PORT 0x19520 +#define TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT 16 +#define TXGBE_RDB_FDIR_FLEX 0x19524 +#define TXGBE_RDB_FDIR_FLEX_FLEX_SHIFT 16 +#define TXGBE_RDB_FDIR_HASH 0x19528 +#define TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX(v) FIELD_PREP(GENMASK(31, 16), v) +#define TXGBE_RDB_FDIR_HASH_BUCKET_VALID BIT(15) +#define TXGBE_RDB_FDIR_CMD 0x1952C +#define TXGBE_RDB_FDIR_CMD_CMD_MASK GENMASK(1, 0) +#define TXGBE_RDB_FDIR_CMD_CMD(v) FIELD_PREP(GENMASK(1, 0), v) +#define TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW TXGBE_RDB_FDIR_CMD_CMD(1) +#define TXGBE_RDB_FDIR_CMD_CMD_REMOVE_FLOW TXGBE_RDB_FDIR_CMD_CMD(2) +#define TXGBE_RDB_FDIR_CMD_CMD_QUERY_REM_FILT TXGBE_RDB_FDIR_CMD_CMD(3) +#define TXGBE_RDB_FDIR_CMD_FILTER_VALID BIT(2) +#define TXGBE_RDB_FDIR_CMD_FILTER_UPDATE BIT(3) +#define TXGBE_RDB_FDIR_CMD_FLOW_TYPE(v) FIELD_PREP(GENMASK(6, 5), v) +#define TXGBE_RDB_FDIR_CMD_DROP BIT(9) +#define TXGBE_RDB_FDIR_CMD_LAST BIT(11) +#define TXGBE_RDB_FDIR_CMD_QUEUE_EN BIT(15) +#define TXGBE_RDB_FDIR_CMD_RX_QUEUE(v) FIELD_PREP(GENMASK(22, 16), v) +#define TXGBE_RDB_FDIR_CMD_VT_POOL(v) FIELD_PREP(GENMASK(29, 24), v) +#define TXGBE_RDB_FDIR_DA4_MSK 0x1953C +#define TXGBE_RDB_FDIR_SA4_MSK 0x19540 +#define TXGBE_RDB_FDIR_TCP_MSK 0x19544 +#define TXGBE_RDB_FDIR_UDP_MSK 0x19548 +#define TXGBE_RDB_FDIR_SCTP_MSK 0x19560 +#define TXGBE_RDB_FDIR_HKEY 0x19568 +#define TXGBE_RDB_FDIR_SKEY 0x1956C +#define TXGBE_RDB_FDIR_OTHER_MSK 0x19570 +#define TXGBE_RDB_FDIR_OTHER_MSK_POOL BIT(2) +#define TXGBE_RDB_FDIR_OTHER_MSK_L4P BIT(3) +#define TXGBE_RDB_FDIR_FLEX_CFG(_i) (0x19580 + ((_i) * 4)) +#define TXGBE_RDB_FDIR_FLEX_CFG_FIELD0 GENMASK(7, 0) +#define TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC FIELD_PREP(GENMASK(1, 0), 0) +#define TXGBE_RDB_FDIR_FLEX_CFG_MSK BIT(2) +#define TXGBE_RDB_FDIR_FLEX_CFG_OFST(v) FIELD_PREP(GENMASK(7, 3), v) + +/*************************** Amber Lite Registers ****************************/ +#define TXGBE_PX_PF_BME 0x4B8 +#define TXGBE_AML_MAC_TX_CFG 0x11000 +#define TXGBE_AML_MAC_TX_CFG_TE BIT(0) +#define TXGBE_AML_MAC_TX_CFG_SPEED_MASK GENMASK(30, 27) +#define TXGBE_AML_MAC_TX_CFG_SPEED_40G FIELD_PREP(GENMASK(30, 27), 0) +#define TXGBE_AML_MAC_TX_CFG_SPEED_25G FIELD_PREP(GENMASK(30, 27), 2) +#define TXGBE_AML_MAC_TX_CFG_SPEED_10G FIELD_PREP(GENMASK(30, 27), 8) +#define TXGBE_RDM_RSC_CTL 0x1200C +#define TXGBE_RDM_RSC_CTL_FREE_CTL BIT(7) + /* Checksum and EEPROM pointers */ #define TXGBE_EEPROM_LAST_WORD 0x800 #define TXGBE_EEPROM_CHECKSUM 0x2F @@ -96,6 +178,8 @@ #define TXGBE_EEPROM_VERSION_L 0x1D #define TXGBE_EEPROM_VERSION_H 0x1E #define TXGBE_ISCSI_BOOT_CONFIG 0x07 +#define TXGBE_EEPROM_I2C_SRART_PTR 0x580 +#define TXGBE_EEPROM_I2C_END_PTR 0x800 #define TXGBE_MAX_MSIX_VECTORS 64 #define TXGBE_MAX_FDIR_INDICES 63 @@ -104,13 +188,107 @@ #define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) #define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) -#define TXGBE_SP_MAX_TX_QUEUES 128 -#define TXGBE_SP_MAX_RX_QUEUES 128 -#define TXGBE_SP_RAR_ENTRIES 128 -#define TXGBE_SP_MC_TBL_SIZE 128 -#define TXGBE_SP_VFT_TBL_SIZE 128 -#define TXGBE_SP_RX_PB_SIZE 512 -#define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */ +#define TXGBE_MAX_TXQ 128 +#define TXGBE_MAX_RXQ 128 +#define TXGBE_RAR_ENTRIES 128 +#define TXGBE_MC_TBL_SIZE 128 +#define TXGBE_VFT_TBL_SIZE 128 +#define TXGBE_RX_PB_SIZE 512 +#define TXGBE_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */ + +#define TXGBE_MAX_VFS_DRV_LIMIT 63 + +#define TXGBE_DEFAULT_ATR_SAMPLE_RATE 20 + +/* Software ATR hash keys */ +#define TXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 +#define TXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 + +/* Software ATR input stream values and masks */ +#define TXGBE_ATR_HASH_MASK 0x7fff +#define TXGBE_ATR_L4TYPE_MASK 0x3 +#define TXGBE_ATR_L4TYPE_UDP 0x1 +#define TXGBE_ATR_L4TYPE_TCP 0x2 +#define TXGBE_ATR_L4TYPE_SCTP 0x3 +#define TXGBE_ATR_L4TYPE_IPV6_MASK 0x4 +#define TXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10 + +enum txgbe_atr_flow_type { + TXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, + TXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, + TXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, + TXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, + TXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, + TXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, + TXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, + TXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, + TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10, + TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11, + TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12, + TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13, + TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14, + TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15, + TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16, + TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17, +}; + +/* Flow Director ATR input struct. */ +union txgbe_atr_input { + /* Byte layout in order, all values with MSB first: + * + * vm_pool - 1 byte + * flow_type - 1 byte + * vlan_id - 2 bytes + * dst_ip - 16 bytes + * src_ip - 16 bytes + * src_port - 2 bytes + * dst_port - 2 bytes + * flex_bytes - 2 bytes + * bkt_hash - 2 bytes + */ + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + __be32 dst_ip[4]; + __be32 src_ip[4]; + __be16 src_port; + __be16 dst_port; + __be16 flex_bytes; + __be16 bkt_hash; + } formatted; + __be32 dword_stream[11]; +}; + +/* Flow Director compressed ATR hash input struct */ +union txgbe_atr_hash_dword { + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + } formatted; + __be32 ip; + struct { + __be16 src; + __be16 dst; + } port; + __be16 flex_bytes; + __be32 dword; +}; + +enum txgbe_fdir_pballoc_type { + TXGBE_FDIR_PBALLOC_NONE = 0, + TXGBE_FDIR_PBALLOC_64K = 1, + TXGBE_FDIR_PBALLOC_128K = 2, + TXGBE_FDIR_PBALLOC_256K = 3, +}; + +struct txgbe_fdir_filter { + struct hlist_node fdir_node; + union txgbe_atr_input filter; + u16 sw_idx; + u64 action; +}; /* TX/RX descriptor defines */ #define TXGBE_DEFAULT_TXD 512 @@ -134,6 +312,73 @@ extern char txgbe_driver_name[]; void txgbe_down(struct wx *wx); void txgbe_up(struct wx *wx); int txgbe_setup_tc(struct net_device *dev, u8 tc); +void txgbe_do_reset(struct net_device *netdev); + +#define TXGBE_LINK_SPEED_10GB_FULL 4 +#define TXGBE_LINK_SPEED_25GB_FULL 0x10 + +#define TXGBE_SFF_IDENTIFIER_SFP 0x3 +#define TXGBE_SFF_DA_PASSIVE_CABLE 0x4 +#define TXGBE_SFF_DA_ACTIVE_CABLE 0x8 +#define TXGBE_SFF_DA_SPEC_ACTIVE_LIMIT 0x4 +#define TXGBE_SFF_FCPI4_LIMITING 0x3 +#define TXGBE_SFF_10GBASESR_CAPABLE 0x10 +#define TXGBE_SFF_10GBASELR_CAPABLE 0x20 +#define TXGBE_SFF_25GBASESR_CAPABLE 0x2 +#define TXGBE_SFF_25GBASELR_CAPABLE 0x3 +#define TXGBE_SFF_25GBASEER_CAPABLE 0x4 +#define TXGBE_SFF_25GBASECR_91FEC 0xB +#define TXGBE_SFF_25GBASECR_74FEC 0xC +#define TXGBE_SFF_25GBASECR_NOFEC 0xD + +#define TXGBE_PHY_FEC_RS BIT(0) +#define TXGBE_PHY_FEC_BASER BIT(1) +#define TXGBE_PHY_FEC_OFF BIT(2) +#define TXGBE_PHY_FEC_AUTO (TXGBE_PHY_FEC_OFF | \ + TXGBE_PHY_FEC_BASER |\ + TXGBE_PHY_FEC_RS) + +#define FW_PHY_GET_LINK_CMD 0xC0 +#define FW_PHY_SET_LINK_CMD 0xC1 +#define FW_READ_SFP_INFO_CMD 0xC5 + +struct txgbe_sfp_id { + u8 identifier; /* A0H 0x00 */ + u8 com_1g_code; /* A0H 0x06 */ + u8 com_10g_code; /* A0H 0x03 */ + u8 com_25g_code; /* A0H 0x24 */ + u8 cable_spec; /* A0H 0x3C */ + u8 cable_tech; /* A0H 0x08 */ + u8 vendor_oui0; /* A0H 0x25 */ + u8 vendor_oui1; /* A0H 0x26 */ + u8 vendor_oui2; /* A0H 0x27 */ + u8 reserved[3]; +}; + +struct txgbe_hic_i2c_read { + struct wx_hic_hdr hdr; + struct txgbe_sfp_id id; +}; + +struct txgbe_hic_ephy_setlink { + struct wx_hic_hdr hdr; + u8 speed; + u8 duplex; + u8 autoneg; + u8 fec_mode; + u8 resv[4]; +}; + +struct txgbe_hic_ephy_getlink { + struct wx_hic_hdr hdr; + u8 speed; + u8 duplex; + u8 autoneg; + u8 flow_ctl; + u8 power; + u8 fec_mode; + u8 resv[6]; +}; #define NODE_PROP(_NAME, _PROP) \ (const struct software_node) { \ @@ -171,8 +416,8 @@ struct txgbe_nodes { }; enum txgbe_misc_irqs { - TXGBE_IRQ_GPIO = 0, - TXGBE_IRQ_LINK, + TXGBE_IRQ_LINK = 0, + TXGBE_IRQ_GPIO, TXGBE_IRQ_MAX }; @@ -187,14 +432,26 @@ struct txgbe { struct wx *wx; struct txgbe_nodes nodes; struct txgbe_irq misc; - struct dw_xpcs *xpcs; + struct phylink_pcs *pcs; struct platform_device *sfp_dev; struct platform_device *i2c_dev; struct clk_lookup *clock; struct clk *clk; struct gpio_chip *gpio; - unsigned int gpio_irq; unsigned int link_irq; + unsigned int gpio_irq; + u32 eicr; + + /* flow director */ + struct hlist_head fdir_filter_list; + union txgbe_atr_input fdir_mask; + int fdir_filter_count; + spinlock_t fdir_perfect_lock; /* spinlock for FDIR */ + + DECLARE_PHY_INTERFACE_MASK(sfp_interfaces); + __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); + u8 link_port; }; #endif /* _TXGBE_TYPE_H_ */ |