diff options
Diffstat (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-dev.c')
| -rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 781 |
1 files changed, 421 insertions, 360 deletions
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 06f953e1e9b2..b646ae575e6a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1,117 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) /* - * AMD 10Gb Ethernet driver - * - * This file is available to you under your choice of the following two - * licenses: - * - * License 1: GPLv2 - * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * - * This file is free software; you may copy, redistribute and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or (at - * your option) any later version. - * - * This file is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - * - * - * License 2: Modified BSD - * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Advanced Micro Devices, Inc. nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. + * Copyright (c) 2014-2025, Advanced Micro Devices, Inc. + * Copyright (c) 2014, Synopsys, Inc. + * All rights reserved */ #include <linux/phy.h> @@ -119,9 +10,12 @@ #include <linux/clk.h> #include <linux/bitrev.h> #include <linux/crc32.h> +#include <linux/crc32poly.h> +#include <linux/pci.h> #include "xgbe.h" #include "xgbe-common.h" +#include "xgbe-smn.h" static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata) { @@ -317,6 +211,20 @@ static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata) } XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE); + pdata->sph = true; +} + +static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata) +{ + unsigned int i; + + for (i = 0; i < pdata->channel_count; i++) { + if (!pdata->channel[i]->rx_ring) + break; + + XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0); + } + pdata->sph = false; } static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type, @@ -479,19 +387,72 @@ static bool xgbe_is_pfc_queue(struct xgbe_prv_data *pdata, return false; } +static void xgbe_set_vxlan_id(struct xgbe_prv_data *pdata) +{ + /* Program the VXLAN port */ + XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, pdata->vxlan_port); + + netif_dbg(pdata, drv, pdata->netdev, "VXLAN tunnel id set to %hx\n", + pdata->vxlan_port); +} + +static void xgbe_enable_vxlan(struct xgbe_prv_data *pdata) +{ + if (!pdata->hw_feat.vxn) + return; + + /* Program the VXLAN port */ + xgbe_set_vxlan_id(pdata); + + /* Allow for IPv6/UDP zero-checksum VXLAN packets */ + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 1); + + /* Enable VXLAN tunneling mode */ + XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNM, 0); + XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 1); + + netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration enabled\n"); +} + +static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata) +{ + if (!pdata->hw_feat.vxn) + return; + + /* Disable tunneling mode */ + XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 0); + + /* Clear IPv6/UDP zero-checksum VXLAN packets setting */ + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 0); + + /* Clear the VXLAN port */ + XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, 0); + + netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n"); +} + +static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata) +{ + unsigned int max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; + + /* From MAC ver 30H the TFCR is per priority, instead of per queue */ + if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30) + return max_q_count; + else + return min_t(unsigned int, pdata->tx_q_count, max_q_count); +} + static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) { - unsigned int max_q_count, q_count; unsigned int reg, reg_val; - unsigned int i; + unsigned int i, q_count; /* Clear MTL flow control */ for (i = 0; i < pdata->rx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); /* Clear MAC flow control */ - max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; - q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); + q_count = xgbe_get_fc_queue_count(pdata); reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) { reg_val = XGMAC_IOREAD(pdata, reg); @@ -508,9 +469,8 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) { struct ieee_pfc *pfc = pdata->pfc; struct ieee_ets *ets = pdata->ets; - unsigned int max_q_count, q_count; unsigned int reg, reg_val; - unsigned int i; + unsigned int i, q_count; /* Set MTL flow control */ for (i = 0; i < pdata->rx_q_count; i++) { @@ -534,8 +494,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) } /* Set MAC flow control */ - max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; - q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); + q_count = xgbe_get_fc_queue_count(pdata); reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) { reg_val = XGMAC_IOREAD(pdata, reg); @@ -605,32 +564,38 @@ static void xgbe_config_flow_control(struct xgbe_prv_data *pdata) static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) { struct xgbe_channel *channel; - unsigned int dma_ch_isr, dma_ch_ier; - unsigned int i; + unsigned int i, ver; /* Set the interrupt mode if supported */ if (pdata->channel_irq_mode) XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM, pdata->channel_irq_mode); + ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER); + for (i = 0; i < pdata->channel_count; i++) { channel = pdata->channel[i]; /* Clear all the interrupts which are set */ - dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); - XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr); + XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, + XGMAC_DMA_IOREAD(channel, DMA_CH_SR)); /* Clear all interrupt enable bits */ - dma_ch_ier = 0; + channel->curr_ier = 0; /* Enable following interrupts * NIE - Normal Interrupt Summary Enable * AIE - Abnormal Interrupt Summary Enable * FBEE - Fatal Bus Error Enable */ - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1); - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1); - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); + if (ver < 0x21) { + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE20, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE20, 1); + } else { + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE, 1); + } + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); if (channel->tx_ring) { /* Enable the following Tx interrupts @@ -639,7 +604,8 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) * mode) */ if (!pdata->per_channel_irq || pdata->channel_irq_mode) - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); + XGMAC_SET_BITS(channel->curr_ier, + DMA_CH_IER, TIE, 1); } if (channel->rx_ring) { /* Enable following Rx interrupts @@ -648,12 +614,13 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) * per channel interrupts in edge triggered * mode) */ - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); if (!pdata->per_channel_irq || pdata->channel_irq_mode) - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); + XGMAC_SET_BITS(channel->curr_ier, + DMA_CH_IER, RIE, 1); } - XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); + XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); } } @@ -754,6 +721,9 @@ static int xgbe_set_speed(struct xgbe_prv_data *pdata, int speed) unsigned int ss; switch (speed) { + case SPEED_10: + ss = 0x07; + break; case SPEED_1000: ss = 0x03; break; @@ -835,7 +805,6 @@ static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata) static u32 xgbe_vid_crc32_le(__le16 vid_le) { - u32 poly = 0xedb88320; /* CRCPOLY_LE */ u32 crc = ~0; u32 temp = 0; unsigned char *data = (unsigned char *)&vid_le; @@ -852,7 +821,7 @@ static u32 xgbe_vid_crc32_le(__le16 vid_le) data_byte >>= 1; if (temp) - crc ^= poly; + crc ^= CRC32_POLY_LE; } return crc; @@ -1028,7 +997,7 @@ static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata) return 0; } -static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr) +static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, const u8 *addr) { unsigned int mac_addr_hi, mac_addr_lo; @@ -1088,18 +1057,19 @@ static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio) return 0; } -static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, - int mmd_reg) +static unsigned int xgbe_get_mmd_address(struct xgbe_prv_data *pdata, + int mmd_reg) { - unsigned long flags; - unsigned int mmd_address, index, offset; - int mmd_data; - - if (mmd_reg & MII_ADDR_C45) - mmd_address = mmd_reg & ~MII_ADDR_C45; - else - mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); + return (mmd_reg & XGBE_ADDR_C45) ? + mmd_reg & ~XGBE_ADDR_C45 : + (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); +} +static void xgbe_get_pcs_index_and_offset(struct xgbe_prv_data *pdata, + unsigned int mmd_address, + unsigned int *index, + unsigned int *offset) +{ /* The PCS registers are accessed using mmio. The underlying * management interface uses indirect addressing to access the MMD * register sets. This requires accessing of the PCS register in two @@ -1110,8 +1080,98 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, * offset 1 bit and reading 16 bits of data. */ mmd_address <<= 1; - index = mmd_address & ~pdata->xpcs_window_mask; - offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); + *index = mmd_address & ~pdata->xpcs_window_mask; + *offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); +} + +static int xgbe_read_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad, + int mmd_reg) +{ + unsigned int mmd_address, index, offset; + u32 smn_address; + int mmd_data; + int ret; + + mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); + + xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset); + + smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg; + ret = amd_smn_write(0, smn_address, index); + if (ret) + return ret; + + ret = amd_smn_read(0, pdata->smn_base + offset, &mmd_data); + if (ret) + return ret; + + mmd_data = (offset % 4) ? FIELD_GET(XGBE_GEN_HI_MASK, mmd_data) : + FIELD_GET(XGBE_GEN_LO_MASK, mmd_data); + + return mmd_data; +} + +static void xgbe_write_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad, + int mmd_reg, int mmd_data) +{ + unsigned int pci_mmd_data, hi_mask, lo_mask; + unsigned int mmd_address, index, offset; + struct pci_dev *dev; + u32 smn_address; + int ret; + + dev = pdata->pcidev; + mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); + + xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset); + + smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg; + ret = amd_smn_write(0, smn_address, index); + if (ret) { + pci_err(dev, "Failed to write data 0x%x\n", index); + return; + } + + ret = amd_smn_read(0, pdata->smn_base + offset, &pci_mmd_data); + if (ret) { + pci_err(dev, "Failed to read data\n"); + return; + } + + if (offset % 4) { + hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK, mmd_data); + lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, pci_mmd_data); + } else { + hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK, + FIELD_GET(XGBE_GEN_HI_MASK, pci_mmd_data)); + lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, mmd_data); + } + + pci_mmd_data = hi_mask | lo_mask; + + ret = amd_smn_write(0, smn_address, index); + if (ret) { + pci_err(dev, "Failed to write data 0x%x\n", index); + return; + } + + ret = amd_smn_write(0, (pdata->smn_base + offset), pci_mmd_data); + if (ret) { + pci_err(dev, "Failed to write data 0x%x\n", pci_mmd_data); + return; + } +} + +static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, + int mmd_reg) +{ + unsigned int mmd_address, index, offset; + unsigned long flags; + int mmd_data; + + mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); + + xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset); spin_lock_irqsave(&pdata->xpcs_lock, flags); XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); @@ -1127,23 +1187,9 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, unsigned long flags; unsigned int mmd_address, index, offset; - if (mmd_reg & MII_ADDR_C45) - mmd_address = mmd_reg & ~MII_ADDR_C45; - else - mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); + mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); - /* The PCS registers are accessed using mmio. The underlying - * management interface uses indirect addressing to access the MMD - * register sets. This requires accessing of the PCS register in two - * phases, an address phase and a data phase. - * - * The mmio interface is based on 16-bit offsets and values. All - * register offsets must therefore be adjusted by left shifting the - * offset 1 bit and writing 16 bits of data. - */ - mmd_address <<= 1; - index = mmd_address & ~pdata->xpcs_window_mask; - offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); + xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset); spin_lock_irqsave(&pdata->xpcs_lock, flags); XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); @@ -1158,10 +1204,7 @@ static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, unsigned int mmd_address; int mmd_data; - if (mmd_reg & MII_ADDR_C45) - mmd_address = mmd_reg & ~MII_ADDR_C45; - else - mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); + mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); /* The PCS registers are accessed using mmio. The underlying APB3 * management interface uses indirect addressing to access the MMD @@ -1186,10 +1229,7 @@ static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, unsigned int mmd_address; unsigned long flags; - if (mmd_reg & MII_ADDR_C45) - mmd_address = mmd_reg & ~MII_ADDR_C45; - else - mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); + mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); /* The PCS registers are accessed using mmio. The underlying APB3 * management interface uses indirect addressing to access the MMD @@ -1216,6 +1256,9 @@ static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, case XGBE_XPCS_ACCESS_V2: default: return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg); + + case XGBE_XPCS_ACCESS_V3: + return xgbe_read_mmd_regs_v3(pdata, prtad, mmd_reg); } } @@ -1226,22 +1269,45 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, case XGBE_XPCS_ACCESS_V1: return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data); + case XGBE_XPCS_ACCESS_V3: + return xgbe_write_mmd_regs_v3(pdata, prtad, mmd_reg, mmd_data); + case XGBE_XPCS_ACCESS_V2: default: return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data); } } -static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, - int reg, u16 val) +static unsigned int xgbe_create_mdio_sca_c22(int port, int reg) { - unsigned int mdio_sca, mdio_sccd; + unsigned int mdio_sca; - reinit_completion(&pdata->mdio_complete); + mdio_sca = 0; + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); + + return mdio_sca; +} + +static unsigned int xgbe_create_mdio_sca_c45(int port, unsigned int da, int reg) +{ + unsigned int mdio_sca; mdio_sca = 0; - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da); + + return mdio_sca; +} + +static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, + unsigned int mdio_sca, u16 val) +{ + unsigned int mdio_sccd; + + reinit_completion(&pdata->mdio_complete); + XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); mdio_sccd = 0; @@ -1258,16 +1324,33 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, return 0; } -static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, - int reg) +static int xgbe_write_ext_mii_regs_c22(struct xgbe_prv_data *pdata, int addr, + int reg, u16 val) +{ + unsigned int mdio_sca; + + mdio_sca = xgbe_create_mdio_sca_c22(addr, reg); + + return xgbe_write_ext_mii_regs(pdata, mdio_sca, val); +} + +static int xgbe_write_ext_mii_regs_c45(struct xgbe_prv_data *pdata, int addr, + int devad, int reg, u16 val) +{ + unsigned int mdio_sca; + + mdio_sca = xgbe_create_mdio_sca_c45(addr, devad, reg); + + return xgbe_write_ext_mii_regs(pdata, mdio_sca, val); +} + +static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, + unsigned int mdio_sca) { - unsigned int mdio_sca, mdio_sccd; + unsigned int mdio_sccd; reinit_completion(&pdata->mdio_complete); - mdio_sca = 0; - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); mdio_sccd = 0; @@ -1283,6 +1366,26 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, return XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA); } +static int xgbe_read_ext_mii_regs_c22(struct xgbe_prv_data *pdata, int addr, + int reg) +{ + unsigned int mdio_sca; + + mdio_sca = xgbe_create_mdio_sca_c22(addr, reg); + + return xgbe_read_ext_mii_regs(pdata, mdio_sca); +} + +static int xgbe_read_ext_mii_regs_c45(struct xgbe_prv_data *pdata, int addr, + int devad, int reg) +{ + unsigned int mdio_sca; + + mdio_sca = xgbe_create_mdio_sca_c45(addr, devad, reg); + + return xgbe_read_ext_mii_regs(pdata, mdio_sca); +} + static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port, enum xgbe_mdio_mode mode) { @@ -1457,125 +1560,6 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel) DBGPR("<--rx_desc_init\n"); } -static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata, - unsigned int addend) -{ - unsigned int count = 10000; - - /* Set the addend register value and tell the device */ - XGMAC_IOWRITE(pdata, MAC_TSAR, addend); - XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1); - - /* Wait for addend update to complete */ - while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG)) - udelay(5); - - if (!count) - netdev_err(pdata->netdev, - "timed out updating timestamp addend register\n"); -} - -static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec, - unsigned int nsec) -{ - unsigned int count = 10000; - - /* Set the time values and tell the device */ - XGMAC_IOWRITE(pdata, MAC_STSUR, sec); - XGMAC_IOWRITE(pdata, MAC_STNUR, nsec); - XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1); - - /* Wait for time update to complete */ - while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT)) - udelay(5); - - if (!count) - netdev_err(pdata->netdev, "timed out initializing timestamp\n"); -} - -static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata) -{ - u64 nsec; - - nsec = XGMAC_IOREAD(pdata, MAC_STSR); - nsec *= NSEC_PER_SEC; - nsec += XGMAC_IOREAD(pdata, MAC_STNR); - - return nsec; -} - -static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata) -{ - unsigned int tx_snr, tx_ssr; - u64 nsec; - - if (pdata->vdata->tx_tstamp_workaround) { - tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR); - tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR); - } else { - tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR); - tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR); - } - - if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS)) - return 0; - - nsec = tx_ssr; - nsec *= NSEC_PER_SEC; - nsec += tx_snr; - - return nsec; -} - -static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet, - struct xgbe_ring_desc *rdesc) -{ - u64 nsec; - - if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) && - !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) { - nsec = le32_to_cpu(rdesc->desc1); - nsec <<= 32; - nsec |= le32_to_cpu(rdesc->desc0); - if (nsec != 0xffffffffffffffffULL) { - packet->rx_tstamp = nsec; - XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, - RX_TSTAMP, 1); - } - } -} - -static int xgbe_config_tstamp(struct xgbe_prv_data *pdata, - unsigned int mac_tscr) -{ - /* Set one nano-second accuracy */ - XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1); - - /* Set fine timestamp update */ - XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1); - - /* Overwrite earlier timestamps */ - XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1); - - XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr); - - /* Exit if timestamping is not enabled */ - if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA)) - return 0; - - /* Initialize time registers */ - XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC); - XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC); - xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend); - xgbe_set_tstamp_time(pdata, 0, 0); - - /* Initialize the timecounter */ - timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, - ktime_to_ns(ktime_get_real())); - - return 0; -} - static void xgbe_tx_start_xmit(struct xgbe_channel *channel, struct xgbe_ring *ring) { @@ -1608,7 +1592,8 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) struct xgbe_ring_data *rdata; struct xgbe_ring_desc *rdesc; struct xgbe_packet_data *packet = &ring->packet_data; - unsigned int csum, tso, vlan; + unsigned int tx_packets, tx_bytes; + unsigned int csum, tso, vlan, vxlan; unsigned int tso_context, vlan_context; unsigned int tx_set_ic; int start_index = ring->cur; @@ -1617,12 +1602,17 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) DBGPR("-->xgbe_dev_xmit\n"); + tx_packets = packet->tx_packets; + tx_bytes = packet->tx_bytes; + csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, CSUM_ENABLE); tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, TSO_ENABLE); vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VLAN_CTAG); + vxlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, + VXLAN); if (tso && (packet->mss != ring->tx.cur_mss)) tso_context = 1; @@ -1644,13 +1634,12 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) * - Addition of Tx frame count to the frame count since the * last interrupt was set does not exceed the frame count setting */ - ring->coalesce_count += packet->tx_packets; + ring->coalesce_count += tx_packets; if (!pdata->tx_frames) tx_set_ic = 0; - else if (packet->tx_packets > pdata->tx_frames) + else if (tx_packets > pdata->tx_frames) tx_set_ic = 1; - else if ((ring->coalesce_count % pdata->tx_frames) < - packet->tx_packets) + else if ((ring->coalesce_count % pdata->tx_frames) < tx_packets) tx_set_ic = 1; else tx_set_ic = 0; @@ -1740,7 +1729,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN, packet->tcp_header_len / 4); - pdata->ext_stats.tx_tso_packets++; + pdata->ext_stats.tx_tso_packets += tx_packets; } else { /* Enable CRC and Pad Insertion */ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0); @@ -1755,6 +1744,13 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) packet->length); } + if (vxlan) { + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, VNP, + TX_NORMAL_DESC3_VXLAN_PACKET); + + pdata->ext_stats.tx_vxlan_packets += packet->tx_packets; + } + for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) { cur_index++; rdata = XGBE_GET_DESC_DATA(ring, cur_index); @@ -1788,8 +1784,11 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); /* Save the Tx info to report back during cleanup */ - rdata->tx.packets = packet->tx_packets; - rdata->tx.bytes = packet->tx_bytes; + rdata->tx.packets = tx_packets; + rdata->tx.bytes = tx_bytes; + + pdata->ext_stats.txq_packets[channel->queue_index] += tx_packets; + pdata->ext_stats.txq_bytes[channel->queue_index] += tx_bytes; /* In case the Tx DMA engine is running, make sure everything * is written to the descriptor(s) before setting the OWN bit @@ -1810,7 +1809,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) smp_wmb(); ring->cur = cur_index + 1; - if (!packet->skb->xmit_more || + if (!netdev_xmit_more() || netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev, channel->queue_index))) xgbe_tx_start_xmit(channel, ring); @@ -1913,9 +1912,28 @@ static int xgbe_dev_read(struct xgbe_channel *channel) rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); /* Set checksum done indicator as appropriate */ - if (netdev->features & NETIF_F_RXCSUM) + if (netdev->features & NETIF_F_RXCSUM) { XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CSUM_DONE, 1); + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + TNPCSUM_DONE, 1); + } + + /* Set the tunneled packet indicator */ + if (XGMAC_GET_BITS_LE(rdesc->desc2, RX_NORMAL_DESC2, TNP)) { + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + TNP, 1); + pdata->ext_stats.rx_vxlan_packets++; + + l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); + switch (l34t) { + case RX_DESC3_L34T_IPV4_UNKNOWN: + case RX_DESC3_L34T_IPV6_UNKNOWN: + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + TNPCSUM_DONE, 0); + break; + } + } /* Check for errors (only valid in last descriptor) */ err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES); @@ -1935,14 +1953,30 @@ static int xgbe_dev_read(struct xgbe_channel *channel) packet->vlan_ctag); } } else { - if ((etlt == 0x05) || (etlt == 0x06)) + unsigned int tnp = XGMAC_GET_BITS(packet->attributes, + RX_PACKET_ATTRIBUTES, TNP); + + if ((etlt == 0x05) || (etlt == 0x06)) { XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CSUM_DONE, 0); - else + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + TNPCSUM_DONE, 0); + pdata->ext_stats.rx_csum_errors++; + } else if (tnp && ((etlt == 0x09) || (etlt == 0x0a))) { + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + CSUM_DONE, 0); + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + TNPCSUM_DONE, 0); + pdata->ext_stats.rx_vxlan_csum_errors++; + } else { XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS, FRAME, 1); + } } + pdata->ext_stats.rxq_packets[channel->queue_index]++; + pdata->ext_stats.rxq_bytes[channel->queue_index] += rdata->rx.len; + DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name, ring->cur & (ring->rdesc_count - 1), ring->cur); @@ -1964,44 +1998,40 @@ static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc) static int xgbe_enable_int(struct xgbe_channel *channel, enum xgbe_int int_id) { - unsigned int dma_ch_ier; - - dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); - switch (int_id) { case XGMAC_INT_DMA_CH_SR_TI: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); break; case XGMAC_INT_DMA_CH_SR_TPS: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 1); break; case XGMAC_INT_DMA_CH_SR_TBU: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 1); break; case XGMAC_INT_DMA_CH_SR_RI: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); break; case XGMAC_INT_DMA_CH_SR_RBU: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); break; case XGMAC_INT_DMA_CH_SR_RPS: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 1); break; case XGMAC_INT_DMA_CH_SR_TI_RI: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); break; case XGMAC_INT_DMA_CH_SR_FBE: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); break; case XGMAC_INT_DMA_ALL: - dma_ch_ier |= channel->saved_ier; + channel->curr_ier |= channel->saved_ier; break; default: return -1; } - XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); + XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); return 0; } @@ -2009,45 +2039,41 @@ static int xgbe_enable_int(struct xgbe_channel *channel, static int xgbe_disable_int(struct xgbe_channel *channel, enum xgbe_int int_id) { - unsigned int dma_ch_ier; - - dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); - switch (int_id) { case XGMAC_INT_DMA_CH_SR_TI: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0); break; case XGMAC_INT_DMA_CH_SR_TPS: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 0); break; case XGMAC_INT_DMA_CH_SR_TBU: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 0); break; case XGMAC_INT_DMA_CH_SR_RI: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0); break; case XGMAC_INT_DMA_CH_SR_RBU: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 0); break; case XGMAC_INT_DMA_CH_SR_RPS: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 0); break; case XGMAC_INT_DMA_CH_SR_TI_RI: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0); - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0); break; case XGMAC_INT_DMA_CH_SR_FBE: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 0); break; case XGMAC_INT_DMA_ALL: - channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK; - dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK; + channel->saved_ier = channel->curr_ier; + channel->curr_ier = 0; break; default: return -1; } - XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); + XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); return 0; } @@ -2707,9 +2733,19 @@ static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata) { unsigned int val; - val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0; - - XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); + if (pdata->netdev->mtu > XGMAC_JUMBO_PACKET_MTU) { + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSL, + XGMAC_GIANT_PACKET_MTU); + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, WD, 1); + XGMAC_IOWRITE_BITS(pdata, MAC_TCR, JD, 1); + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSLCE, 1); + } else { + val = pdata->netdev->mtu > XGMAC_STD_PACKET_MTU ? 1 : 0; + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSLCE, 0); + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, WD, 0); + XGMAC_IOWRITE_BITS(pdata, MAC_TCR, JD, 0); + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); + } } static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata) @@ -3384,8 +3420,12 @@ static int xgbe_init(struct xgbe_prv_data *pdata) xgbe_config_tx_coalesce(pdata); xgbe_config_rx_buffer_size(pdata); xgbe_config_tso_mode(pdata); - xgbe_config_sph_mode(pdata); - xgbe_config_rss(pdata); + + if (pdata->netdev->features & NETIF_F_RXCSUM) { + xgbe_config_sph_mode(pdata); + xgbe_config_rss(pdata); + } + desc_if->wrapper_tx_desc_init(pdata); desc_if->wrapper_rx_desc_init(pdata); xgbe_enable_dma_interrupts(pdata); @@ -3454,8 +3494,10 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) hw_if->set_speed = xgbe_set_speed; hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode; - hw_if->read_ext_mii_regs = xgbe_read_ext_mii_regs; - hw_if->write_ext_mii_regs = xgbe_write_ext_mii_regs; + hw_if->read_ext_mii_regs_c22 = xgbe_read_ext_mii_regs_c22; + hw_if->write_ext_mii_regs_c22 = xgbe_write_ext_mii_regs_c22; + hw_if->read_ext_mii_regs_c45 = xgbe_read_ext_mii_regs_c45; + hw_if->write_ext_mii_regs_c45 = xgbe_write_ext_mii_regs_c45; hw_if->set_gpio = xgbe_set_gpio; hw_if->clr_gpio = xgbe_clr_gpio; @@ -3512,13 +3554,6 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) hw_if->rx_mmc_int = xgbe_rx_mmc_int; hw_if->read_mmc_stats = xgbe_read_mmc_stats; - /* For PTP config */ - hw_if->config_tstamp = xgbe_config_tstamp; - hw_if->update_tstamp_addend = xgbe_update_tstamp_addend; - hw_if->set_tstamp_time = xgbe_set_tstamp_time; - hw_if->get_tstamp_time = xgbe_get_tstamp_time; - hw_if->get_tx_tstamp = xgbe_get_tx_tstamp; - /* For Data Center Bridging config */ hw_if->config_tc = xgbe_config_tc; hw_if->config_dcb_tc = xgbe_config_dcb_tc; @@ -3534,5 +3569,31 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) hw_if->disable_ecc_ded = xgbe_disable_ecc_ded; hw_if->disable_ecc_sec = xgbe_disable_ecc_sec; + /* For VXLAN */ + hw_if->enable_vxlan = xgbe_enable_vxlan; + hw_if->disable_vxlan = xgbe_disable_vxlan; + hw_if->set_vxlan_id = xgbe_set_vxlan_id; + + /* For Split Header*/ + hw_if->enable_sph = xgbe_config_sph_mode; + hw_if->disable_sph = xgbe_disable_sph_mode; + DBGPR("<--xgbe_init_function_ptrs\n"); } + +int xgbe_enable_mac_loopback(struct xgbe_prv_data *pdata) +{ + /* Enable MAC loopback mode */ + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, LM, 1); + + /* Wait for loopback to stabilize */ + usleep_range(10, 15); + + return 0; +} + +void xgbe_disable_mac_loopback(struct xgbe_prv_data *pdata) +{ + /* Disable MAC loopback mode */ + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, LM, 0); +} |
