summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/mediatek/mt76/mt7996/dma.c')
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c535
1 files changed, 403 insertions, 132 deletions
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
index 69a7d9b2e38b..274b273df1ee 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2022 MediaTek Inc.
*/
@@ -17,12 +17,15 @@ int mt7996_init_tx_queues(struct mt7996_phy *phy, int idx, int n_desc,
ring_base += MT_TXQ_ID(0) * MT_RING_SIZE;
idx -= MT_TXQ_ID(0);
- if (phy->mt76->band_idx == MT_BAND2)
+ if (wed == &dev->mt76.mmio.wed_hif2)
flags = MT_WED_Q_TX(0);
else
flags = MT_WED_Q_TX(idx);
}
+ if (mt76_npu_device_active(&dev->mt76))
+ flags = MT_NPU_Q_TX(phy->mt76->band_idx);
+
return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc,
ring_base, wed, flags);
}
@@ -55,58 +58,110 @@ static void mt7996_dma_config(struct mt7996_dev *dev)
/* rx queue */
RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7996_RXQ_MCU_WM);
+ /* for mt7990, RX ring 1 is for SDO instead */
RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7996_RXQ_MCU_WA);
-
- /* mt7996: band0 and band1, mt7992: band0 */
RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7996_RXQ_BAND0);
- RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN, MT7996_RXQ_MCU_WA_MAIN);
+ if (mt7996_has_wa(dev))
+ RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN,
+ MT7996_RXQ_MCU_WA_MAIN);
- if (is_mt7996(&dev->mt76)) {
+ switch (mt76_chip(&dev->mt76)) {
+ case MT7992_DEVICE_ID:
+ RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT, MT7996_RXQ_MCU_WA_EXT);
+ RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7996_RXQ_BAND1);
+ break;
+ case MT7990_DEVICE_ID:
+ RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7996_RXQ_BAND1);
+ RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0,
+ MT_INT_RX_TXFREE_BAND0_MT7990, MT7990_RXQ_TXFREE0);
+ if (dev->hif2)
+ RXQ_CONFIG(MT_RXQ_TXFREE_BAND1, WFDMA0,
+ MT_INT_RX_TXFREE_BAND1_MT7990, MT7990_RXQ_TXFREE1);
+ break;
+ case MT7996_DEVICE_ID:
+ default:
/* mt7996 band2 */
- RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2);
RXQ_CONFIG(MT_RXQ_BAND2_WA, WFDMA0, MT_INT_RX_DONE_WA_TRI, MT7996_RXQ_MCU_WA_TRI);
- } else {
- /* mt7992 band1 */
- RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7996_RXQ_BAND1);
- RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT, MT7996_RXQ_MCU_WA_EXT);
+ RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2);
+ break;
}
- if (dev->has_rro) {
+ if (mt7996_has_hwrro(dev)) {
/* band0 */
RXQ_CONFIG(MT_RXQ_RRO_BAND0, WFDMA0, MT_INT_RX_DONE_RRO_BAND0,
MT7996_RXQ_RRO_BAND0);
- RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND0,
- MT7996_RXQ_MSDU_PG_BAND0);
- RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0, MT_INT_RX_TXFREE_MAIN,
- MT7996_RXQ_TXFREE0);
- /* band1 */
- RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND1,
- MT7996_RXQ_MSDU_PG_BAND1);
- /* band2 */
- RXQ_CONFIG(MT_RXQ_RRO_BAND2, WFDMA0, MT_INT_RX_DONE_RRO_BAND2,
- MT7996_RXQ_RRO_BAND2);
- RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND2,
- MT7996_RXQ_MSDU_PG_BAND2);
- RXQ_CONFIG(MT_RXQ_TXFREE_BAND2, WFDMA0, MT_INT_RX_TXFREE_TRI,
- MT7996_RXQ_TXFREE2);
-
- RXQ_CONFIG(MT_RXQ_RRO_IND, WFDMA0, MT_INT_RX_DONE_RRO_IND,
- MT7996_RXQ_RRO_IND);
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3)
+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0, WFDMA0,
+ MT_INT_RX_DONE_MSDU_PG_BAND0,
+ MT7996_RXQ_MSDU_PG_BAND0);
+ if (is_mt7996(&dev->mt76)) {
+ RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0,
+ MT_INT_RX_TXFREE_MAIN, MT7996_RXQ_TXFREE0);
+ /* band1 */
+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1, WFDMA0,
+ MT_INT_RX_DONE_MSDU_PG_BAND1,
+ MT7996_RXQ_MSDU_PG_BAND1);
+ /* band2 */
+ RXQ_CONFIG(MT_RXQ_RRO_BAND2, WFDMA0,
+ MT_INT_RX_DONE_RRO_BAND2,
+ MT7996_RXQ_RRO_BAND2);
+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2, WFDMA0,
+ MT_INT_RX_DONE_MSDU_PG_BAND2,
+ MT7996_RXQ_MSDU_PG_BAND2);
+ RXQ_CONFIG(MT_RXQ_TXFREE_BAND2, WFDMA0,
+ MT_INT_RX_TXFREE_TRI, MT7996_RXQ_TXFREE2);
+ } else {
+ RXQ_CONFIG(MT_RXQ_RRO_BAND1, WFDMA0,
+ MT_INT_RX_DONE_RRO_BAND1,
+ MT7996_RXQ_RRO_BAND1);
+ }
+
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3)
+ RXQ_CONFIG(MT_RXQ_RRO_IND, WFDMA0,
+ MT_INT_RX_DONE_RRO_IND,
+ MT7996_RXQ_RRO_IND);
+ else
+ RXQ_CONFIG(MT_RXQ_RRO_RXDMAD_C, WFDMA0,
+ MT_INT_RX_DONE_RRO_RXDMAD_C,
+ MT7996_RXQ_RRO_RXDMAD_C);
}
/* data tx queue */
- TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0);
if (is_mt7996(&dev->mt76)) {
- TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1);
- TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2, MT7996_TXQ_BAND2);
+ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0);
+ if (dev->hif2) {
+ /* default bn1:ring19 bn2:ring21 */
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1,
+ MT7996_TXQ_BAND1);
+ TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2,
+ MT7996_TXQ_BAND2);
+ } else {
+ /* single pcie bn0/1:ring18 bn2:ring19 */
+ TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND1,
+ MT7996_TXQ_BAND1);
+ }
} else {
- TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1);
+ if (dev->hif2) {
+ /* bn0:ring18 bn1:ring21 */
+ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0,
+ MT7996_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND2,
+ MT7996_TXQ_BAND2);
+ } else {
+ /* single pcie bn0:ring18 bn1:ring19 */
+ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0,
+ MT7996_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1,
+ MT7996_TXQ_BAND1);
+ }
}
/* mcu tx queue */
- MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7996_TXQ_MCU_WM);
- MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA, MT7996_TXQ_MCU_WA);
MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7996_TXQ_FWDL);
+ MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7996_TXQ_MCU_WM);
+ if (mt7996_has_wa(dev))
+ MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA,
+ MT7996_TXQ_MCU_WA);
}
static u32 __mt7996_dma_prefetch_base(u16 *base, u8 depth)
@@ -121,43 +176,63 @@ static u32 __mt7996_dma_prefetch_base(u16 *base, u8 depth)
static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs)
{
u16 base = 0;
- u8 queue;
+ u8 queue, val;
#define PREFETCH(_depth) (__mt7996_dma_prefetch_base(&base, (_depth)))
/* prefetch SRAM wrapping boundary for tx/rx ring. */
- mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x2));
- mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x2));
+ /* Tx Command Rings */
+ val = is_mt7996(&dev->mt76) ? 2 : 4;
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(val));
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(val));
+ if (mt7996_has_wa(dev))
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(val));
+
+ /* Tx Data Rings */
mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x8));
- mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0x8));
- mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x2));
- mt76_wr(dev, MT_TXQ_EXT_CTRL(2) + ofs, PREFETCH(0x8));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(0x2));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(0x2));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(0x2));
-
- queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2_WA : MT_RXQ_BAND1_WA;
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(queue) + ofs, PREFETCH(0x2));
-
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x10));
+ if (!is_mt7996(&dev->mt76) || dev->hif2)
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0x8));
+ if (is_mt7996(&dev->mt76))
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(2) + ofs, PREFETCH(0x8));
+
+ /* Rx Event Rings */
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(val));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(val));
+
+ /* Rx TxFreeDone From WA Rings */
+ if (mt7996_has_wa(dev)) {
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(val));
+ queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2_WA : MT_RXQ_BAND1_WA;
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(val));
+ }
+ /* Rx TxFreeDone From MAC Rings */
+ val = is_mt7996(&dev->mt76) ? 4 : 8;
+ if ((is_mt7996(&dev->mt76) && mt7996_has_hwrro(dev)) ||
+ is_mt7990(&dev->mt76))
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND0) + ofs, PREFETCH(val));
+ if (is_mt7990(&dev->mt76) && dev->hif2)
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND1) + ofs, PREFETCH(val));
+ else if (is_mt7996(&dev->mt76) && mt7996_has_hwrro(dev))
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND2) + ofs, PREFETCH(val));
+
+ /* Rx Data Rings */
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x10));
queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2 : MT_RXQ_BAND1;
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(queue) + ofs, PREFETCH(0x10));
-
- if (dev->has_rro) {
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND0) + ofs,
- PREFETCH(0x10));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND2) + ofs,
- PREFETCH(0x10));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND0) + ofs,
- PREFETCH(0x4));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND1) + ofs,
- PREFETCH(0x4));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND2) + ofs,
- PREFETCH(0x4));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND0) + ofs,
- PREFETCH(0x4));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND2) + ofs,
- PREFETCH(0x4));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(0x10));
+
+ /* Rx RRO Rings */
+ if (mt7996_has_hwrro(dev)) {
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_RRO_BAND0) + ofs, PREFETCH(0x10));
+ queue = is_mt7996(&dev->mt76) ? MT_RXQ_RRO_BAND2 : MT_RXQ_RRO_BAND1;
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(0x10));
+
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MSDU_PAGE_BAND0) + ofs, PREFETCH(val));
+ if (is_mt7996(&dev->mt76)) {
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MSDU_PAGE_BAND1) + ofs,
+ PREFETCH(val));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MSDU_PAGE_BAND2) + ofs,
+ PREFETCH(val));
+ }
}
#undef PREFETCH
@@ -255,11 +330,14 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
if (mt7996_band_valid(dev, MT_BAND0))
irq_mask |= MT_INT_BAND0_RX_DONE;
- if (mt7996_band_valid(dev, MT_BAND1))
+ if (mt7996_band_valid(dev, MT_BAND1)) {
irq_mask |= MT_INT_BAND1_RX_DONE;
+ if (is_mt7992(&dev->mt76) && dev->hif2)
+ irq_mask |= MT_INT_RX_TXFREE_BAND1_EXT;
+ }
if (mt7996_band_valid(dev, MT_BAND2))
- irq_mask |= MT_INT_BAND2_RX_DONE;
+ irq_mask |= MT_INT_BAND2_RX_DONE | MT_INT_TX_RX_DONE_EXT;
if (mtk_wed_device_active(wed) && wed_reset) {
u32 wed_irq_mask = irq_mask;
@@ -269,6 +347,9 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
mtk_wed_device_start(wed, wed_irq_mask);
}
+ if (!mt7996_has_wa(dev) || mt76_npu_device_active(&dev->mt76))
+ irq_mask &= ~(MT_INT_RX(MT_RXQ_MAIN_WA) |
+ MT_INT_RX(MT_RXQ_BAND1_WA));
irq_mask = reset ? MT_INT_MCU_CMD : irq_mask;
mt7996_irq_enable(dev, irq_mask);
@@ -342,13 +423,48 @@ static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE);
mt76_set(dev, MT_WFDMA_HOST_CONFIG,
- MT_WFDMA_HOST_CONFIG_PDMA_BAND |
- MT_WFDMA_HOST_CONFIG_BAND2_PCIE1);
+ MT_WFDMA_HOST_CONFIG_PDMA_BAND);
+
+ mt76_clear(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_BAND0_PCIE1 |
+ MT_WFDMA_HOST_CONFIG_BAND1_PCIE1 |
+ MT_WFDMA_HOST_CONFIG_BAND2_PCIE1);
+
+ if (is_mt7996(&dev->mt76))
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_BAND2_PCIE1);
+ else
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_BAND1_PCIE1);
/* AXI read outstanding number */
mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL,
MT_WFDMA_AXI_R2A_CTRL_OUTSTAND_MASK, 0x14);
+ if (dev->hif2->speed < PCIE_SPEED_5_0GT ||
+ (dev->hif2->speed == PCIE_SPEED_5_0GT &&
+ dev->hif2->width < PCIE_LNK_X2)) {
+ mt76_rmw(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs,
+ WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
+ FIELD_PREP(WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
+ 0x1));
+ mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL2,
+ MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
+ FIELD_PREP(MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
+ 0x1));
+ } else if (dev->hif2->speed < PCIE_SPEED_8_0GT ||
+ (dev->hif2->speed == PCIE_SPEED_8_0GT &&
+ dev->hif2->width < PCIE_LNK_X2)) {
+ mt76_rmw(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs,
+ WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
+ FIELD_PREP(WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
+ 0x2));
+ mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL2,
+ MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
+ FIELD_PREP(MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
+ 0x2));
+ }
+
/* WFDMA rx threshold */
mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_45_TH + hif1_ofs, 0xc000c);
mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_67_TH + hif1_ofs, 0x10008);
@@ -361,27 +477,61 @@ static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
* so, redirect pcie0 rx ring3 interrupt to pcie1
*/
if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
- dev->has_rro)
+ mt7996_has_hwrro(dev)) {
+ u32 intr = is_mt7996(&dev->mt76) ?
+ MT_WFDMA0_RX_INT_SEL_RING6 :
+ MT_WFDMA0_RX_INT_SEL_RING9 |
+ MT_WFDMA0_RX_INT_SEL_RING5;
+
mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL + hif1_ofs,
- MT_WFDMA0_RX_INT_SEL_RING6);
- else
+ intr);
+ } else {
mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL,
MT_WFDMA0_RX_INT_SEL_RING3);
+ }
}
mt7996_dma_start(dev, reset, true);
}
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
int mt7996_dma_rro_init(struct mt7996_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
u32 irq_mask;
int ret;
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) {
+ /* rxdmad_c */
+ mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].flags = MT_WED_RRO_Q_RXDMAD_C;
+ if (mtk_wed_device_active(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].wed = &mdev->mmio.wed;
+ else if (!mt76_npu_device_active(&dev->mt76))
+ mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].flags |= MT_QFLAG_EMI_EN;
+ ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_RRO_RXDMAD_C],
+ MT_RXQ_ID(MT_RXQ_RRO_RXDMAD_C),
+ MT7996_RX_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RRO_AP_RING_BASE);
+ if (ret)
+ return ret;
+
+ if (!mtk_wed_device_active(&mdev->mmio.wed)) {
+ /* We need to set cpu idx pointer before resetting the
+ * EMI queues.
+ */
+ mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].emi_cpu_idx =
+ &dev->wed_rro.emi_rings_cpu.ptr->ring[0].idx;
+ mt76_queue_reset(dev, &mdev->q_rx[MT_RXQ_RRO_RXDMAD_C],
+ true);
+ }
+ goto start_hw_rro;
+ }
+
/* ind cmd */
mdev->q_rx[MT_RXQ_RRO_IND].flags = MT_WED_RRO_Q_IND;
- mdev->q_rx[MT_RXQ_RRO_IND].wed = &mdev->mmio.wed;
+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
+ mtk_wed_get_rx_capa(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_RRO_IND].wed = &mdev->mmio.wed;
ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_RRO_IND],
MT_RXQ_ID(MT_RXQ_RRO_IND),
MT7996_RX_RING_SIZE,
@@ -392,7 +542,9 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
/* rx msdu page queue for band0 */
mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags =
MT_WED_RRO_Q_MSDU_PG(0) | MT_QFLAG_WED_RRO_EN;
- mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed;
+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
+ mtk_wed_get_rx_capa(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed;
ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0],
MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND0),
MT7996_RX_RING_SIZE,
@@ -401,11 +553,13 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
if (ret)
return ret;
- if (mt7996_band_valid(dev, MT_BAND1)) {
+ if (mt7996_band_valid(dev, MT_BAND1) && is_mt7996(&dev->mt76)) {
/* rx msdu page queue for band1 */
mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags =
MT_WED_RRO_Q_MSDU_PG(1) | MT_QFLAG_WED_RRO_EN;
- mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed;
+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
+ mtk_wed_get_rx_capa(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed;
ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1],
MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND1),
MT7996_RX_RING_SIZE,
@@ -419,7 +573,9 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
/* rx msdu page queue for band2 */
mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags =
MT_WED_RRO_Q_MSDU_PG(2) | MT_QFLAG_WED_RRO_EN;
- mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed;
+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
+ mtk_wed_get_rx_capa(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed;
ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2],
MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND2),
MT7996_RX_RING_SIZE,
@@ -429,15 +585,44 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
return ret;
}
- irq_mask = mdev->mmio.irqmask | MT_INT_RRO_RX_DONE |
- MT_INT_TX_DONE_BAND2;
- mt76_wr(dev, MT_INT_MASK_CSR, irq_mask);
- mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false);
- mt7996_irq_enable(dev, irq_mask);
+start_hw_rro:
+ if (mtk_wed_device_active(&mdev->mmio.wed)) {
+ irq_mask = mdev->mmio.irqmask |
+ MT_INT_TX_DONE_BAND2;
+
+ mt76_wr(dev, MT_INT_MASK_CSR, irq_mask);
+ mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false);
+ mt7996_irq_enable(dev, irq_mask);
+ } else {
+ if (is_mt7996(&dev->mt76)) {
+ mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND1,
+ mt76_dma_rx_poll);
+ mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND2,
+ mt76_dma_rx_poll);
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND2,
+ mt76_dma_rx_poll);
+ } else {
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND1,
+ mt76_dma_rx_poll);
+ }
+
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND0, mt76_dma_rx_poll);
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) {
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_RXDMAD_C,
+ mt76_dma_rx_poll);
+ } else {
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_IND,
+ mt76_dma_rx_poll);
+ mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND0,
+ mt76_dma_rx_poll);
+ }
+
+ if (!mt76_npu_device_active(&dev->mt76))
+ mt7996_irq_enable(dev, MT_INT_RRO_RX_DONE);
+ }
return 0;
}
-#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
int mt7996_dma_init(struct mt7996_dev *dev)
{
@@ -474,12 +659,14 @@ int mt7996_dma_init(struct mt7996_dev *dev)
return ret;
/* command to WA */
- ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA,
- MT_MCUQ_ID(MT_MCUQ_WA),
- MT7996_TX_MCU_RING_SIZE,
- MT_MCUQ_RING_BASE(MT_MCUQ_WA));
- if (ret)
- return ret;
+ if (mt7996_has_wa(dev)) {
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA,
+ MT_MCUQ_ID(MT_MCUQ_WA),
+ MT7996_TX_MCU_RING_SIZE,
+ MT_MCUQ_RING_BASE(MT_MCUQ_WA));
+ if (ret)
+ return ret;
+ }
/* firmware download */
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL,
@@ -493,16 +680,16 @@ int mt7996_dma_init(struct mt7996_dev *dev)
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
MT_RXQ_ID(MT_RXQ_MCU),
MT7996_RX_MCU_RING_SIZE,
- MT_RX_BUF_SIZE,
+ MT7996_RX_MCU_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_MCU));
if (ret)
return ret;
- /* event from WA */
+ /* event from WA, or SDO event for mt7990 */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
MT_RXQ_ID(MT_RXQ_MCU_WA),
MT7996_RX_MCU_RING_SIZE_WA,
- MT_RX_BUF_SIZE,
+ MT7996_RX_MCU_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_MCU_WA));
if (ret)
return ret;
@@ -522,18 +709,48 @@ int mt7996_dma_init(struct mt7996_dev *dev)
return ret;
/* tx free notify event from WA for band0 */
- if (mtk_wed_device_active(wed) && !dev->has_rro) {
+ if (mtk_wed_device_active(wed) &&
+ ((is_mt7996(&dev->mt76) && !mt7996_has_hwrro(dev)) ||
+ (is_mt7992(&dev->mt76)))) {
dev->mt76.q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE;
dev->mt76.q_rx[MT_RXQ_MAIN_WA].wed = wed;
}
- ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
- MT_RXQ_ID(MT_RXQ_MAIN_WA),
- MT7996_RX_MCU_RING_SIZE,
- MT_RX_BUF_SIZE,
- MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA));
- if (ret)
- return ret;
+ if (mt7996_has_wa(dev)) {
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
+ MT_RXQ_ID(MT_RXQ_MAIN_WA),
+ MT7996_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA));
+ if (ret)
+ return ret;
+ } else {
+ if (mtk_wed_device_active(wed)) {
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
+ }
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
+ MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
+ MT7996_RX_MCU_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0));
+ if (ret)
+ return ret;
+ }
+
+ if (!mt7996_has_wa(dev) && dev->hif2) {
+ if (mtk_wed_device_active(wed)) {
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1].flags = MT_WED_Q_TXFREE;
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1].wed = wed;
+ }
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1],
+ MT_RXQ_ID(MT_RXQ_TXFREE_BAND1),
+ MT7996_RX_MCU_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND1));
+ if (ret)
+ return ret;
+ }
if (mt7996_band_valid(dev, MT_BAND2)) {
/* rx data queue for mt7996 band2 */
@@ -549,7 +766,7 @@ int mt7996_dma_init(struct mt7996_dev *dev)
/* tx free notify event from WA for mt7996 band2
* use pcie0's rx ring3, but, redirect pcie0 rx ring3 interrupt to pcie1
*/
- if (mtk_wed_device_active(wed_hif2) && !dev->has_rro) {
+ if (mtk_wed_device_active(wed_hif2) && !mt7996_has_hwrro(dev)) {
dev->mt76.q_rx[MT_RXQ_BAND2_WA].flags = MT_WED_Q_TXFREE;
dev->mt76.q_rx[MT_RXQ_BAND2_WA].wed = wed_hif2;
}
@@ -564,6 +781,11 @@ int mt7996_dma_init(struct mt7996_dev *dev)
} else if (mt7996_band_valid(dev, MT_BAND1)) {
/* rx data queue for mt7992 band1 */
rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1) + hif1_ofs;
+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) {
+ dev->mt76.q_rx[MT_RXQ_BAND1].flags = MT_WED_Q_RX(1);
+ dev->mt76.q_rx[MT_RXQ_BAND1].wed = wed;
+ }
+
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1],
MT_RXQ_ID(MT_RXQ_BAND1),
MT7996_RX_RING_SIZE,
@@ -573,22 +795,30 @@ int mt7996_dma_init(struct mt7996_dev *dev)
return ret;
/* tx free notify event from WA for mt7992 band1 */
- rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs;
- ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA],
- MT_RXQ_ID(MT_RXQ_BAND1_WA),
- MT7996_RX_MCU_RING_SIZE,
- MT_RX_BUF_SIZE,
- rx_base);
- if (ret)
- return ret;
+ if (mt7996_has_wa(dev)) {
+ rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs;
+ if (mtk_wed_device_active(wed_hif2)) {
+ dev->mt76.q_rx[MT_RXQ_BAND1_WA].flags =
+ MT_WED_Q_TXFREE;
+ dev->mt76.q_rx[MT_RXQ_BAND1_WA].wed = wed_hif2;
+ }
+
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA],
+ MT_RXQ_ID(MT_RXQ_BAND1_WA),
+ MT7996_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ rx_base);
+ if (ret)
+ return ret;
+ }
}
- if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed) &&
- dev->has_rro) {
+ if (mt7996_has_hwrro(dev)) {
/* rx rro data queue for band0 */
dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags =
MT_WED_RRO_Q_DATA(0) | MT_QFLAG_WED_RRO_EN;
- dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed;
+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
+ dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed;
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0],
MT_RXQ_ID(MT_RXQ_RRO_BAND0),
MT7996_RX_RING_SIZE,
@@ -597,23 +827,44 @@ int mt7996_dma_init(struct mt7996_dev *dev)
if (ret)
return ret;
- /* tx free notify event from WA for band0 */
- dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
- dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
+ if (is_mt7992(&dev->mt76)) {
+ dev->mt76.q_rx[MT_RXQ_RRO_BAND1].flags =
+ MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN;
+ if (mtk_wed_device_active(wed) &&
+ mtk_wed_get_rx_capa(wed))
+ dev->mt76.q_rx[MT_RXQ_RRO_BAND1].wed = wed;
+ ret = mt76_queue_alloc(dev,
+ &dev->mt76.q_rx[MT_RXQ_RRO_BAND1],
+ MT_RXQ_ID(MT_RXQ_RRO_BAND1),
+ MT7996_RX_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND1) + hif1_ofs);
+ if (ret)
+ return ret;
+ } else {
+ if (mtk_wed_device_active(wed)) {
+ /* tx free notify event from WA for band0 */
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
+ }
- ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
- MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
- MT7996_RX_MCU_RING_SIZE,
- MT7996_RX_BUF_SIZE,
- MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0));
- if (ret)
- return ret;
+ ret = mt76_queue_alloc(dev,
+ &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
+ MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
+ MT7996_RX_MCU_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0));
+ if (ret)
+ return ret;
+ }
if (mt7996_band_valid(dev, MT_BAND2)) {
/* rx rro data queue for band2 */
dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags =
MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN;
- dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed;
+ if (mtk_wed_device_active(wed) &&
+ mtk_wed_get_rx_capa(wed))
+ dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed;
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2],
MT_RXQ_ID(MT_RXQ_RRO_BAND2),
MT7996_RX_RING_SIZE,
@@ -641,6 +892,10 @@ int mt7996_dma_init(struct mt7996_dev *dev)
if (ret < 0)
return ret;
+ ret = mt7996_npu_rx_queues_init(dev);
+ if (ret)
+ return ret;
+
netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
mt7996_poll_tx);
napi_enable(&dev->mt76.tx_napi);
@@ -684,6 +939,10 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
mt76_tx_status_check(&dev->mt76, true);
+ if (mt7996_has_hwrro(dev) &&
+ !mtk_wed_device_active(&dev->mt76.mmio.wed))
+ mt7996_rro_msdu_page_map_free(dev);
+
/* reset wfsys */
if (force)
mt7996_wfsys_reset(dev);
@@ -694,6 +953,7 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
if (mtk_wed_device_active(&dev->mt76.mmio.wed))
mtk_wed_device_dma_reset(&dev->mt76.mmio.wed);
+ mt76_npu_disable_irqs(&dev->mt76);
mt7996_dma_disable(dev, force);
mt76_wed_dma_reset(&dev->mt76);
@@ -707,21 +967,32 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
}
for (i = 0; i < __MT_MCUQ_MAX; i++)
- mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
+ mt76_queue_reset(dev, dev->mt76.q_mcu[i], true);
mt76_for_each_q_rx(&dev->mt76, i) {
- if (mtk_wed_device_active(&dev->mt76.mmio.wed))
- if (mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]) ||
- mt76_queue_is_wed_tx_free(&dev->mt76.q_rx[i]))
- continue;
+ struct mt76_queue *q = &dev->mt76.q_rx[i];
- mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+ if (mt76_queue_is_wed_rro(q) ||
+ mt76_queue_is_wed_tx_free(q)) {
+ if (force && mt76_queue_is_wed_rro_data(q))
+ mt76_queue_reset(dev, q, false);
+ continue;
+ }
+ }
+ mt76_queue_reset(dev, q, true);
}
mt76_tx_status_check(&dev->mt76, true);
- mt76_for_each_q_rx(&dev->mt76, i)
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) && force &&
+ (mt76_queue_is_wed_rro_ind(&dev->mt76.q_rx[i]) ||
+ mt76_queue_is_wed_rro_msdu_pg(&dev->mt76.q_rx[i])))
+ continue;
+
mt76_queue_rx_reset(dev, i);
+ }
mt7996_dma_enable(dev, !force);
}