diff options
Diffstat (limited to 'drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c')
| -rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c | 252 |
1 files changed, 141 insertions, 111 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index d99fa028c646..7b513324cfb0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -13,11 +13,11 @@ #include <linux/io.h> #include "dwmac4.h" #include "dwmac4_dma.h" +#include "stmmac.h" static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) { u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); - int i; pr_info("dwmac4: Master AXI performs %s burst length\n", (value & DMA_SYS_BUS_FB) ? "fixed" : "any"); @@ -37,112 +37,99 @@ static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) /* Depending on the UNDEF bit the Master AXI will perform any burst * length according to the BLEN programmed (by default all BLEN are - * set). + * set). Note that the UNDEF bit is readonly, and is the inverse of + * Bus Mode bit 16. */ - for (i = 0; i < AXI_BLEN; i++) { - switch (axi->axi_blen[i]) { - case 256: - value |= DMA_AXI_BLEN256; - break; - case 128: - value |= DMA_AXI_BLEN128; - break; - case 64: - value |= DMA_AXI_BLEN64; - break; - case 32: - value |= DMA_AXI_BLEN32; - break; - case 16: - value |= DMA_AXI_BLEN16; - break; - case 8: - value |= DMA_AXI_BLEN8; - break; - case 4: - value |= DMA_AXI_BLEN4; - break; - } - } + value = (value & ~DMA_AXI_BLEN_MASK) | axi->axi_blen_regval; writel(value, ioaddr + DMA_SYS_BUS_MODE); } -static void dwmac4_dma_init_rx_chan(void __iomem *ioaddr, +static void dwmac4_dma_init_rx_chan(struct stmmac_priv *priv, + void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, dma_addr_t dma_rx_phy, u32 chan) { + const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; u32 value; u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; - value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); + value = readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan)); value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT); - writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan)); if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame)) writel(upper_32_bits(dma_rx_phy), - ioaddr + DMA_CHAN_RX_BASE_ADDR_HI(chan)); + ioaddr + DMA_CHAN_RX_BASE_ADDR_HI(dwmac4_addrs, chan)); - writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_CHAN_RX_BASE_ADDR(chan)); + writel(lower_32_bits(dma_rx_phy), + ioaddr + DMA_CHAN_RX_BASE_ADDR(dwmac4_addrs, chan)); } -static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr, +static void dwmac4_dma_init_tx_chan(struct stmmac_priv *priv, + void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, dma_addr_t dma_tx_phy, u32 chan) { + const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; u32 value; u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; - value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); + value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan)); value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT); /* Enable OSP to get best performance */ value |= DMA_CONTROL_OSP; - writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan)); if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame)) writel(upper_32_bits(dma_tx_phy), - ioaddr + DMA_CHAN_TX_BASE_ADDR_HI(chan)); + ioaddr + DMA_CHAN_TX_BASE_ADDR_HI(dwmac4_addrs, chan)); - writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_CHAN_TX_BASE_ADDR(chan)); + writel(lower_32_bits(dma_tx_phy), + ioaddr + DMA_CHAN_TX_BASE_ADDR(dwmac4_addrs, chan)); } -static void dwmac4_dma_init_channel(void __iomem *ioaddr, +static void dwmac4_dma_init_channel(struct stmmac_priv *priv, + void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, u32 chan) { + const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; u32 value; /* common channel control register config */ - value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); + value = readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan)); if (dma_cfg->pblx8) value = value | DMA_BUS_MODE_PBL; - writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); + writel(value, ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan)); /* Mask interrupts by writing to CSR7 */ writel(DMA_CHAN_INTR_DEFAULT_MASK, - ioaddr + DMA_CHAN_INTR_ENA(chan)); + ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan)); } -static void dwmac410_dma_init_channel(void __iomem *ioaddr, +static void dwmac410_dma_init_channel(struct stmmac_priv *priv, + void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, u32 chan) { + const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; u32 value; /* common channel control register config */ - value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); + value = readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan)); if (dma_cfg->pblx8) value = value | DMA_BUS_MODE_PBL; - writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); + writel(value, ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan)); /* Mask interrupts by writing to CSR7 */ writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10, - ioaddr + DMA_CHAN_INTR_ENA(chan)); + ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan)); } static void dwmac4_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, int atds) + struct stmmac_dma_cfg *dma_cfg) { u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); @@ -176,65 +163,88 @@ static void dwmac4_dma_init(void __iomem *ioaddr, } -static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel, +static void _dwmac4_dump_dma_regs(struct stmmac_priv *priv, + void __iomem *ioaddr, u32 channel, u32 *reg_space) { - reg_space[DMA_CHAN_CONTROL(channel) / 4] = - readl(ioaddr + DMA_CHAN_CONTROL(channel)); - reg_space[DMA_CHAN_TX_CONTROL(channel) / 4] = - readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)); - reg_space[DMA_CHAN_RX_CONTROL(channel) / 4] = - readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)); - reg_space[DMA_CHAN_TX_BASE_ADDR(channel) / 4] = - readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel)); - reg_space[DMA_CHAN_RX_BASE_ADDR(channel) / 4] = - readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)); - reg_space[DMA_CHAN_TX_END_ADDR(channel) / 4] = - readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel)); - reg_space[DMA_CHAN_RX_END_ADDR(channel) / 4] = - readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel)); - reg_space[DMA_CHAN_TX_RING_LEN(channel) / 4] = - readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel)); - reg_space[DMA_CHAN_RX_RING_LEN(channel) / 4] = - readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel)); - reg_space[DMA_CHAN_INTR_ENA(channel) / 4] = - readl(ioaddr + DMA_CHAN_INTR_ENA(channel)); - reg_space[DMA_CHAN_RX_WATCHDOG(channel) / 4] = - readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel)); - reg_space[DMA_CHAN_SLOT_CTRL_STATUS(channel) / 4] = - readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel)); - reg_space[DMA_CHAN_CUR_TX_DESC(channel) / 4] = - readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel)); - reg_space[DMA_CHAN_CUR_RX_DESC(channel) / 4] = - readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel)); - reg_space[DMA_CHAN_CUR_TX_BUF_ADDR(channel) / 4] = - readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel)); - reg_space[DMA_CHAN_CUR_RX_BUF_ADDR(channel) / 4] = - readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel)); - reg_space[DMA_CHAN_STATUS(channel) / 4] = - readl(ioaddr + DMA_CHAN_STATUS(channel)); + const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; + const struct dwmac4_addrs *default_addrs = NULL; + + /* Purposely save the registers in the "normal" layout, regardless of + * platform modifications, to keep reg_space size constant + */ + reg_space[DMA_CHAN_CONTROL(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, channel)); + reg_space[DMA_CHAN_TX_CONTROL(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, channel)); + reg_space[DMA_CHAN_RX_CONTROL(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, channel)); + reg_space[DMA_CHAN_TX_BASE_ADDR_HI(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_TX_BASE_ADDR_HI(dwmac4_addrs, channel)); + reg_space[DMA_CHAN_TX_BASE_ADDR(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(dwmac4_addrs, channel)); + reg_space[DMA_CHAN_RX_BASE_ADDR_HI(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_BASE_ADDR_HI(dwmac4_addrs, channel)); + reg_space[DMA_CHAN_RX_BASE_ADDR(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(dwmac4_addrs, channel)); + reg_space[DMA_CHAN_TX_END_ADDR(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_TX_END_ADDR(dwmac4_addrs, channel)); + reg_space[DMA_CHAN_RX_END_ADDR(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_END_ADDR(dwmac4_addrs, channel)); + reg_space[DMA_CHAN_TX_RING_LEN(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_TX_RING_LEN(dwmac4_addrs, channel)); + reg_space[DMA_CHAN_RX_RING_LEN(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_RING_LEN(dwmac4_addrs, channel)); + reg_space[DMA_CHAN_INTR_ENA(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, channel)); + reg_space[DMA_CHAN_RX_WATCHDOG(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_WATCHDOG(dwmac4_addrs, channel)); + reg_space[DMA_CHAN_SLOT_CTRL_STATUS(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(dwmac4_addrs, channel)); + reg_space[DMA_CHAN_CUR_TX_DESC(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_TX_DESC(dwmac4_addrs, channel)); + reg_space[DMA_CHAN_CUR_RX_DESC(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_RX_DESC(dwmac4_addrs, channel)); + reg_space[DMA_CHAN_CUR_TX_BUF_ADDR_HI(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR_HI(dwmac4_addrs, channel)); + reg_space[DMA_CHAN_CUR_TX_BUF_ADDR(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(dwmac4_addrs, channel)); + reg_space[DMA_CHAN_CUR_RX_BUF_ADDR_HI(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR_HI(dwmac4_addrs, channel)); + reg_space[DMA_CHAN_CUR_RX_BUF_ADDR(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(dwmac4_addrs, channel)); + reg_space[DMA_CHAN_STATUS(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, channel)); } -static void dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) +static void dwmac4_dump_dma_regs(struct stmmac_priv *priv, void __iomem *ioaddr, + u32 *reg_space) { int i; for (i = 0; i < DMA_CHANNEL_NB_MAX; i++) - _dwmac4_dump_dma_regs(ioaddr, i, reg_space); + _dwmac4_dump_dma_regs(priv, ioaddr, i, reg_space); } -static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 queue) +static void dwmac4_rx_watchdog(struct stmmac_priv *priv, void __iomem *ioaddr, + u32 riwt, u32 queue) { - writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(queue)); + const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; + + writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(dwmac4_addrs, queue)); } -static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode, +static void dwmac4_dma_rx_chan_op_mode(struct stmmac_priv *priv, + void __iomem *ioaddr, int mode, u32 channel, int fifosz, u8 qmode) { + const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; unsigned int rqs = fifosz / 256 - 1; u32 mtl_rx_op; - mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel)); + mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(dwmac4_addrs, channel)); + + mtl_rx_op |= MTL_OP_MODE_DIS_TCP_EF; if (mode == SF_DMA_MODE) { pr_debug("GMAC: enable RX store and forward mode\n"); @@ -242,7 +252,7 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode, } else { pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode); mtl_rx_op &= ~MTL_OP_MODE_RSF; - mtl_rx_op &= MTL_OP_MODE_RTC_MASK; + mtl_rx_op &= ~MTL_OP_MODE_RTC_MASK; if (mode <= 32) mtl_rx_op |= MTL_OP_MODE_RTC_32; else if (mode <= 64) @@ -292,13 +302,16 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode, mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT; } - writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel)); + writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(dwmac4_addrs, channel)); } -static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode, +static void dwmac4_dma_tx_chan_op_mode(struct stmmac_priv *priv, + void __iomem *ioaddr, int mode, u32 channel, int fifosz, u8 qmode) { - u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); + const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; + u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(dwmac4_addrs, + channel)); unsigned int tqs = fifosz / 256 - 1; if (mode == SF_DMA_MODE) { @@ -308,7 +321,7 @@ static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode, } else { pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode); mtl_tx_op &= ~MTL_OP_MODE_TSF; - mtl_tx_op &= MTL_OP_MODE_TTC_MASK; + mtl_tx_op &= ~MTL_OP_MODE_TTC_MASK; /* Set the transmit threshold */ if (mode <= 32) mtl_tx_op |= MTL_OP_MODE_TTC_32; @@ -344,7 +357,7 @@ static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode, mtl_tx_op &= ~MTL_OP_MODE_TQS_MASK; mtl_tx_op |= tqs << MTL_OP_MODE_TQS_SHIFT; - writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel)); + writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(dwmac4_addrs, channel)); } static int dwmac4_get_hw_feature(void __iomem *ioaddr, @@ -442,26 +455,31 @@ static int dwmac4_get_hw_feature(void __iomem *ioaddr, } /* Enable/disable TSO feature and set MSS */ -static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan) +static void dwmac4_enable_tso(struct stmmac_priv *priv, void __iomem *ioaddr, + bool en, u32 chan) { + const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; u32 value; if (en) { /* enable TSO */ - value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); + value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan)); writel(value | DMA_CONTROL_TSE, - ioaddr + DMA_CHAN_TX_CONTROL(chan)); + ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan)); } else { /* enable TSO */ - value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); + value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan)); writel(value & ~DMA_CONTROL_TSE, - ioaddr + DMA_CHAN_TX_CONTROL(chan)); + ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan)); } } -static void dwmac4_qmode(void __iomem *ioaddr, u32 channel, u8 qmode) +static void dwmac4_qmode(struct stmmac_priv *priv, void __iomem *ioaddr, + u32 channel, u8 qmode) { - u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); + const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; + u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(dwmac4_addrs, + channel)); mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK; if (qmode != MTL_QUEUE_AVB) @@ -469,47 +487,59 @@ static void dwmac4_qmode(void __iomem *ioaddr, u32 channel, u8 qmode) else mtl_tx_op |= MTL_OP_MODE_TXQEN_AV; - writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel)); + writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(dwmac4_addrs, channel)); } -static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan) +static void dwmac4_set_bfsize(struct stmmac_priv *priv, void __iomem *ioaddr, + int bfsize, u32 chan) { - u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); + const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; + u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan)); value &= ~DMA_RBSZ_MASK; value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK; - writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan)); } -static void dwmac4_enable_sph(void __iomem *ioaddr, bool en, u32 chan) +static void dwmac4_enable_sph(struct stmmac_priv *priv, void __iomem *ioaddr, + bool en, u32 chan) { + const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; u32 value = readl(ioaddr + GMAC_EXT_CONFIG); value &= ~GMAC_CONFIG_HDSMS; value |= GMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */ writel(value, ioaddr + GMAC_EXT_CONFIG); - value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); + value = readl(ioaddr + GMAC_EXT_CFG1); + value |= GMAC_CONFIG1_SPLM(1); /* Split mode set to L2OFST */ + value |= GMAC_CONFIG1_SAVE_EN; /* Enable Split AV mode */ + writel(value, ioaddr + GMAC_EXT_CFG1); + + value = readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan)); if (en) value |= DMA_CONTROL_SPH; else value &= ~DMA_CONTROL_SPH; - writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); + writel(value, ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan)); } -static int dwmac4_enable_tbs(void __iomem *ioaddr, bool en, u32 chan) +static int dwmac4_enable_tbs(struct stmmac_priv *priv, void __iomem *ioaddr, + bool en, u32 chan) { - u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); + const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; + u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan)); if (en) value |= DMA_CONTROL_EDSE; else value &= ~DMA_CONTROL_EDSE; - writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan)); - value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)) & DMA_CONTROL_EDSE; + value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, + chan)) & DMA_CONTROL_EDSE; if (en && !value) return -EIO; |
