summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/realtek/rtw88/mac.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/realtek/rtw88/mac.c')
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c88
1 files changed, 88 insertions, 0 deletions
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index d8c5da342b11..f40877bc9c9a 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -719,6 +719,93 @@ dlfw_fail:
return ret;
}
+static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues)
+{
+ struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn;
+ u32 prio_queues = 0;
+
+ if (queues & BIT(IEEE80211_AC_VO))
+ prio_queues |= BIT(rqpn->dma_map_vo);
+ if (queues & BIT(IEEE80211_AC_VI))
+ prio_queues |= BIT(rqpn->dma_map_vi);
+ if (queues & BIT(IEEE80211_AC_BE))
+ prio_queues |= BIT(rqpn->dma_map_be);
+ if (queues & BIT(IEEE80211_AC_BK))
+ prio_queues |= BIT(rqpn->dma_map_bk);
+
+ return prio_queues;
+}
+
+static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev,
+ u32 prio_queue, bool drop)
+{
+ u32 addr;
+ u16 avail_page, rsvd_page;
+ int i;
+
+ switch (prio_queue) {
+ case RTW_DMA_MAPPING_EXTRA:
+ addr = REG_FIFOPAGE_INFO_4;
+ break;
+ case RTW_DMA_MAPPING_LOW:
+ addr = REG_FIFOPAGE_INFO_2;
+ break;
+ case RTW_DMA_MAPPING_NORMAL:
+ addr = REG_FIFOPAGE_INFO_3;
+ break;
+ case RTW_DMA_MAPPING_HIGH:
+ addr = REG_FIFOPAGE_INFO_1;
+ break;
+ default:
+ return;
+ }
+
+ /* check if all of the reserved pages are available for 100 msecs */
+ for (i = 0; i < 5; i++) {
+ rsvd_page = rtw_read16(rtwdev, addr);
+ avail_page = rtw_read16(rtwdev, addr + 2);
+ if (rsvd_page == avail_page)
+ return;
+
+ msleep(20);
+ }
+
+ /* priority queue is still not empty, throw a warning,
+ *
+ * Note that if we want to flush the tx queue when having a lot of
+ * traffic (ex, 100Mbps up), some of the packets could be dropped.
+ * And it requires like ~2secs to flush the full priority queue.
+ */
+ if (!drop)
+ rtw_warn(rtwdev, "timed out to flush queue %d\n", prio_queue);
+}
+
+static void rtw_mac_flush_prio_queues(struct rtw_dev *rtwdev,
+ u32 prio_queues, bool drop)
+{
+ u32 q;
+
+ for (q = 0; q < RTW_DMA_MAPPING_MAX; q++)
+ if (prio_queues & BIT(q))
+ __rtw_mac_flush_prio_queue(rtwdev, q, drop);
+}
+
+void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
+{
+ u32 prio_queues = 0;
+
+ /* If all of the hardware queues are requested to flush,
+ * or the priority queues are not mapped yet,
+ * flush all of the priority queues
+ */
+ if (queues == BIT(rtwdev->hw->queues) - 1 || !rtwdev->fifo.rqpn)
+ prio_queues = BIT(RTW_DMA_MAPPING_MAX) - 1;
+ else
+ prio_queues = get_priority_queues(rtwdev, queues);
+
+ rtw_mac_flush_prio_queues(rtwdev, prio_queues, drop);
+}
+
static int txdma_queue_mapping(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
@@ -743,6 +830,7 @@ static int txdma_queue_mapping(struct rtw_dev *rtwdev)
return -EINVAL;
}
+ rtwdev->fifo.rqpn = rqpn;
txdma_pq_map |= BIT_TXDMA_HIQ_MAP(rqpn->dma_map_hi);
txdma_pq_map |= BIT_TXDMA_MGQ_MAP(rqpn->dma_map_mg);
txdma_pq_map |= BIT_TXDMA_BKQ_MAP(rqpn->dma_map_bk);