summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mediatek
diff options
context:
space:
mode:
authorSujuan Chen <sujuan.chen@mediatek.com>2023-09-18 12:29:16 +0200
committerPaolo Abeni <pabeni@redhat.com>2023-09-19 18:27:56 +0200
commit6757d345dd7dba795f5af44d4442d55a83c4b1b4 (patch)
tree2bd3a47391861b3ddc7a5791971507b7300aceed /drivers/net/ethernet/mediatek
parentb230812b9dda125e69ab0a5a11cda88d9c0d18a9 (diff)
net: ethernet: mtk_wed: introduce hw_rro support for MT7988
MT7988 SoC support 802.11 receive reordering offload in hw while MT7986 SoC implements it through the firmware running on the mcu. Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'drivers/net/ethernet/mediatek')
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c299
1 files changed, 297 insertions, 2 deletions
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index d4b41ccfbad5..2a0be1f2d43e 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -27,7 +27,7 @@
#define MTK_WED_BUF_SIZE 2048
#define MTK_WED_PAGE_BUF_SIZE 128
#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
-#define MTK_WED_RX_PAGE_BUF_PER_PAGE (PAGE_SIZE / 128)
+#define MTK_WED_RX_BUF_PER_PAGE (PAGE_SIZE / MTK_WED_PAGE_BUF_SIZE)
#define MTK_WED_RX_RING_SIZE 1536
#define MTK_WED_RX_PG_BM_CNT 8192
#define MTK_WED_AMSDU_BUF_SIZE (PAGE_SIZE << 4)
@@ -597,6 +597,68 @@ free_pagelist:
}
static int
+mtk_wed_hwrro_buffer_alloc(struct mtk_wed_device *dev)
+{
+ int n_pages = MTK_WED_RX_PG_BM_CNT / MTK_WED_RX_BUF_PER_PAGE;
+ struct mtk_wed_buf *page_list;
+ struct mtk_wed_bm_desc *desc;
+ dma_addr_t desc_phys;
+ int i, page_idx = 0;
+
+ if (!dev->wlan.hw_rro)
+ return 0;
+
+ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+ dev->hw_rro.size = dev->wlan.rx_nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
+ dev->hw_rro.pages = page_list;
+ desc = dma_alloc_coherent(dev->hw->dev,
+ dev->wlan.rx_nbuf * sizeof(*desc),
+ &desc_phys, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ dev->hw_rro.desc = desc;
+ dev->hw_rro.desc_phys = desc_phys;
+
+ for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) {
+ dma_addr_t page_phys, buf_phys;
+ struct page *page;
+ int s;
+
+ page = __dev_alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev->hw->dev, page_phys)) {
+ __free_page(page);
+ return -ENOMEM;
+ }
+
+ page_list[page_idx].p = page;
+ page_list[page_idx++].phy_addr = page_phys;
+ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ buf_phys = page_phys;
+ for (s = 0; s < MTK_WED_RX_BUF_PER_PAGE; s++) {
+ desc->buf0 = cpu_to_le32(buf_phys);
+ buf_phys += MTK_WED_PAGE_BUF_SIZE;
+ desc++;
+ }
+
+ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ }
+
+ return 0;
+}
+
+static int
mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
{
struct mtk_wed_bm_desc *desc;
@@ -613,7 +675,42 @@ mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
dev->rx_buf_ring.desc_phys = desc_phys;
dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt);
- return 0;
+ return mtk_wed_hwrro_buffer_alloc(dev);
+}
+
+static void
+mtk_wed_hwrro_free_buffer(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_buf *page_list = dev->hw_rro.pages;
+ struct mtk_wed_bm_desc *desc = dev->hw_rro.desc;
+ int i, page_idx = 0;
+
+ if (!dev->wlan.hw_rro)
+ return;
+
+ if (!page_list)
+ return;
+
+ if (!desc)
+ goto free_pagelist;
+
+ for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) {
+ dma_addr_t buf_addr = page_list[page_idx].phy_addr;
+ void *page = page_list[page_idx++].p;
+
+ if (!page)
+ break;
+
+ dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ __free_page(page);
+ }
+
+ dma_free_coherent(dev->hw->dev, dev->hw_rro.size * sizeof(*desc),
+ desc, dev->hw_rro.desc_phys);
+
+free_pagelist:
+ kfree(page_list);
}
static void
@@ -627,6 +724,28 @@ mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
dev->wlan.release_rx_buf(dev);
dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc),
desc, dev->rx_buf_ring.desc_phys);
+
+ mtk_wed_hwrro_free_buffer(dev);
+}
+
+static void
+mtk_wed_hwrro_init(struct mtk_wed_device *dev)
+{
+ if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro)
+ return;
+
+ wed_set(dev, MTK_WED_RRO_PG_BM_RX_DMAM,
+ FIELD_PREP(MTK_WED_RRO_PG_BM_RX_SDL0, 128));
+
+ wed_w32(dev, MTK_WED_RRO_PG_BM_BASE, dev->hw_rro.desc_phys);
+
+ wed_w32(dev, MTK_WED_RRO_PG_BM_INIT_PTR,
+ MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX |
+ FIELD_PREP(MTK_WED_RRO_PG_BM_SW_TAIL_IDX,
+ MTK_WED_RX_PG_BM_CNT));
+
+ /* enable rx_page_bm to fetch dmad */
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN);
}
static void
@@ -640,6 +759,8 @@ mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev)
wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH,
FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff));
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
+
+ mtk_wed_hwrro_init(dev);
}
static void
@@ -935,6 +1056,8 @@ mtk_wed_bus_init(struct mtk_wed_device *dev)
static void
mtk_wed_set_wpdma(struct mtk_wed_device *dev)
{
+ int i;
+
if (mtk_wed_is_v1(dev->hw)) {
wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
return;
@@ -952,6 +1075,15 @@ mtk_wed_set_wpdma(struct mtk_wed_device *dev)
wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring0, dev->wlan.wpdma_rx);
+
+ if (!dev->wlan.hw_rro)
+ return;
+
+ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(0), dev->wlan.wpdma_rx_rro[0]);
+ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(1), dev->wlan.wpdma_rx_rro[1]);
+ for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++)
+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING_CFG(i),
+ dev->wlan.wpdma_rx_pg + i * 0x10);
}
static void
@@ -1763,6 +1895,165 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev)
}
static void
+mtk_wed_start_hw_rro(struct mtk_wed_device *dev, u32 irq_mask, bool reset)
+{
+ int i;
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
+
+ if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro)
+ return;
+
+ wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR);
+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
+ MTK_WED_RRO_MSDU_PG_DRV_CLR);
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_RX,
+ MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN |
+ MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR |
+ MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN |
+ MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG,
+ dev->wlan.rro_rx_tbit[0]) |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG,
+ dev->wlan.rro_rx_tbit[1]));
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG,
+ MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN |
+ MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR |
+ MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN |
+ MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR |
+ MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN |
+ MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG,
+ dev->wlan.rx_pg_tbit[0]) |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG,
+ dev->wlan.rx_pg_tbit[1]) |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG,
+ dev->wlan.rx_pg_tbit[2]));
+
+ /* RRO_MSDU_PG_RING2_CFG1_FLD_DRV_EN should be enabled after
+ * WM FWDL completed, otherwise RRO_MSDU_PG ring may broken
+ */
+ wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
+ MTK_WED_RRO_MSDU_PG_DRV_EN);
+
+ for (i = 0; i < MTK_WED_RX_QUEUES; i++) {
+ struct mtk_wed_ring *ring = &dev->rx_rro_ring[i];
+
+ if (!(ring->flags & MTK_WED_RING_CONFIGURED))
+ continue;
+
+ if (mtk_wed_check_wfdma_rx_fill(dev, ring))
+ dev_err(dev->hw->dev,
+ "rx_rro_ring(%d) initialization failed\n", i);
+ }
+
+ for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) {
+ struct mtk_wed_ring *ring = &dev->rx_page_ring[i];
+
+ if (!(ring->flags & MTK_WED_RING_CONFIGURED))
+ continue;
+
+ if (mtk_wed_check_wfdma_rx_fill(dev, ring))
+ dev_err(dev->hw->dev,
+ "rx_page_ring(%d) initialization failed\n", i);
+ }
+}
+
+static void
+mtk_wed_rro_rx_ring_setup(struct mtk_wed_device *dev, int idx,
+ void __iomem *regs)
+{
+ struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx];
+
+ ring->wpdma = regs;
+ wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_BASE,
+ readl(regs));
+ wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_COUNT,
+ readl(regs + MTK_WED_RING_OFS_COUNT));
+ ring->flags |= MTK_WED_RING_CONFIGURED;
+}
+
+static void
+mtk_wed_msdu_pg_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
+{
+ struct mtk_wed_ring *ring = &dev->rx_page_ring[idx];
+
+ ring->wpdma = regs;
+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_BASE,
+ readl(regs));
+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_COUNT,
+ readl(regs + MTK_WED_RING_OFS_COUNT));
+ ring->flags |= MTK_WED_RING_CONFIGURED;
+}
+
+static int
+mtk_wed_ind_rx_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
+{
+ struct mtk_wed_ring *ring = &dev->ind_cmd_ring;
+ u32 val = readl(regs + MTK_WED_RING_OFS_COUNT);
+ int i, count = 0;
+
+ ring->wpdma = regs;
+ wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_BASE,
+ readl(regs) & 0xfffffff0);
+
+ wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_COUNT,
+ readl(regs + MTK_WED_RING_OFS_COUNT));
+
+ /* ack sn cr */
+ wed_w32(dev, MTK_WED_RRO_CFG0, dev->wlan.phy_base +
+ dev->wlan.ind_cmd.ack_sn_addr);
+ wed_w32(dev, MTK_WED_RRO_CFG1,
+ FIELD_PREP(MTK_WED_RRO_CFG1_MAX_WIN_SZ,
+ dev->wlan.ind_cmd.win_size) |
+ FIELD_PREP(MTK_WED_RRO_CFG1_PARTICL_SE_ID,
+ dev->wlan.ind_cmd.particular_sid));
+
+ /* particular session addr element */
+ wed_w32(dev, MTK_WED_ADDR_ELEM_CFG0,
+ dev->wlan.ind_cmd.particular_se_phys);
+
+ for (i = 0; i < dev->wlan.ind_cmd.se_group_nums; i++) {
+ wed_w32(dev, MTK_WED_RADDR_ELEM_TBL_WDATA,
+ dev->wlan.ind_cmd.addr_elem_phys[i] >> 4);
+ wed_w32(dev, MTK_WED_ADDR_ELEM_TBL_CFG,
+ MTK_WED_ADDR_ELEM_TBL_WR | (i & 0x7f));
+
+ val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
+ while (!(val & MTK_WED_ADDR_ELEM_TBL_WR_RDY) && count++ < 100)
+ val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
+ if (count >= 100)
+ dev_err(dev->hw->dev,
+ "write ba session base failed\n");
+ }
+
+ /* pn check init */
+ for (i = 0; i < dev->wlan.ind_cmd.particular_sid; i++) {
+ wed_w32(dev, MTK_WED_PN_CHECK_WDATA_M,
+ MTK_WED_PN_CHECK_IS_FIRST);
+
+ wed_w32(dev, MTK_WED_PN_CHECK_CFG, MTK_WED_PN_CHECK_WR |
+ FIELD_PREP(MTK_WED_PN_CHECK_SE_ID, i));
+
+ count = 0;
+ val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
+ while (!(val & MTK_WED_PN_CHECK_WR_RDY) && count++ < 100)
+ val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
+ if (count >= 100)
+ dev_err(dev->hw->dev,
+ "session(%d) initialization failed\n", i);
+ }
+
+ wed_w32(dev, MTK_WED_RX_IND_CMD_CNT0, MTK_WED_RX_IND_CMD_DBG_CNT_EN);
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN);
+
+ return 0;
+}
+
+static void
mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
{
int i;
@@ -2216,6 +2507,10 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
.detach = mtk_wed_detach,
.ppe_check = mtk_wed_ppe_check,
.setup_tc = mtk_wed_setup_tc,
+ .start_hw_rro = mtk_wed_start_hw_rro,
+ .rro_rx_ring_setup = mtk_wed_rro_rx_ring_setup,
+ .msdu_pg_rx_ring_setup = mtk_wed_msdu_pg_rx_ring_setup,
+ .ind_rx_ring_setup = mtk_wed_ind_rx_ring_setup,
};
struct device_node *eth_np = eth->dev->of_node;
struct platform_device *pdev;