diff options
Diffstat (limited to 'drivers/net/wireless/realtek/rtw88/pci.c')
| -rw-r--r-- | drivers/net/wireless/realtek/rtw88/pci.c | 509 |
1 files changed, 399 insertions, 110 deletions
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index 8228db9a5fc8..56b16186d3aa 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -12,12 +12,16 @@ #include "fw.h" #include "ps.h" #include "debug.h" +#include "mac.h" static bool rtw_disable_msi; +static bool rtw_pci_disable_aspm; module_param_named(disable_msi, rtw_disable_msi, bool, 0644); +module_param_named(disable_aspm, rtw_pci_disable_aspm, bool, 0644); MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support"); +MODULE_PARM_DESC(disable_aspm, "Set Y to disable PCI ASPM support"); -static u32 rtw_pci_tx_queue_idx_addr[] = { +static const u32 rtw_pci_tx_queue_idx_addr[] = { [RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ, [RTW_TX_QUEUE_BE] = RTK_PCI_TXBD_IDX_BEQ, [RTW_TX_QUEUE_VI] = RTK_PCI_TXBD_IDX_VIQ, @@ -27,7 +31,8 @@ static u32 rtw_pci_tx_queue_idx_addr[] = { [RTW_TX_QUEUE_H2C] = RTK_PCI_TXBD_IDX_H2CQ, }; -static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue) +static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, + enum rtw_tx_queue_type queue) { switch (queue) { case RTW_TX_QUEUE_BCN: @@ -85,13 +90,6 @@ static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val) writel(val, rtwpci->mmap + addr); } -static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx) -{ - int offset = tx_ring->r.desc_size * idx; - - return tx_ring->r.head + offset; -} - static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev, struct rtw_pci_tx_ring *tx_ring) { @@ -106,7 +104,7 @@ static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev, tx_data = rtw_pci_get_tx_data(skb); dma = tx_data->dma; - pci_unmap_single(pdev, dma, skb->len, PCI_DMA_TODEVICE); + dma_unmap_single(&pdev->dev, dma, skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(skb); } } @@ -122,7 +120,7 @@ static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev, rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring); /* free the ring itself */ - pci_free_consistent(pdev, ring_sz, head, tx_ring->r.dma); + dma_free_coherent(&pdev->dev, ring_sz, head, tx_ring->r.dma); tx_ring->r.head = NULL; } @@ -141,7 +139,7 @@ static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev, continue; dma = *((dma_addr_t *)skb->cb); - pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE); + dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); dev_kfree_skb(skb); rx_ring->buf[i] = NULL; } @@ -156,7 +154,7 @@ static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev, rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring); - pci_free_consistent(pdev, ring_sz, head, rx_ring->r.dma); + dma_free_coherent(&pdev->dev, ring_sz, head, rx_ring->r.dma); } static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev) @@ -191,7 +189,7 @@ static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev, return -EINVAL; } - head = pci_zalloc_consistent(pdev, ring_sz, &dma); + head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); if (!head) { rtw_err(rtwdev, "failed to allocate tx ring\n"); return -ENOMEM; @@ -220,8 +218,8 @@ static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb, if (!skb) return -EINVAL; - dma = pci_map_single(pdev, skb->data, buf_sz, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(pdev, dma)) + dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, dma)) return -EBUSY; *((dma_addr_t *)skb->cb) = dma; @@ -264,12 +262,7 @@ static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev, int i, allocated; int ret = 0; - if (len > TRX_BD_IDX_MASK) { - rtw_err(rtwdev, "len %d exceeds maximum RX entries\n", len); - return -EINVAL; - } - - head = pci_zalloc_consistent(pdev, ring_sz, &dma); + head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); if (!head) { rtw_err(rtwdev, "failed to allocate rx ring\n"); return -ENOMEM; @@ -308,11 +301,11 @@ err_out: if (!skb) continue; dma = *((dma_addr_t *)skb->cb); - pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE); + dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); rx_ring->buf[i] = NULL; } - pci_free_consistent(pdev, ring_sz, head, dma); + dma_free_coherent(&pdev->dev, ring_sz, head, dma); rtw_err(rtwdev, "failed to init rx buffer\n"); @@ -324,7 +317,7 @@ static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev) struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct rtw_pci_tx_ring *tx_ring; struct rtw_pci_rx_ring *rx_ring; - struct rtw_chip_info *chip = rtwdev->chip; + const struct rtw_chip_info *chip = rtwdev->chip; int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0; int tx_desc_size, rx_desc_size; u32 len; @@ -386,6 +379,7 @@ static int rtw_pci_init(struct rtw_dev *rtwdev) IMR_VODOK | IMR_ROK | IMR_BCNDMAINT_E | + IMR_C2HCMD | 0; rtwpci->irq_mask[1] = IMR_TXFOVW | 0; @@ -411,7 +405,7 @@ static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev) dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma; rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma); - if (!rtw_chip_wcpu_11n(rtwdev)) { + if (!rtw_chip_wcpu_8051(rtwdev)) { len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len; dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma; rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0; @@ -473,7 +467,7 @@ static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev) rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff); /* reset H2C Queue index in a single write */ - if (rtw_chip_wcpu_11ac(rtwdev)) + if (rtw_chip_wcpu_3081(rtwdev)) rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX); } @@ -484,15 +478,16 @@ static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev) } static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev, - struct rtw_pci *rtwpci) + struct rtw_pci *rtwpci, bool exclude_rx) { unsigned long flags; + u32 imr0_unmask = exclude_rx ? IMR_ROK : 0; spin_lock_irqsave(&rtwpci->hwirq_lock, flags); - rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0]); + rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0] & ~imr0_unmask); rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]); - if (rtw_chip_wcpu_11ac(rtwdev)) + if (rtw_chip_wcpu_3081(rtwdev)) rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]); rtwpci->irq_enabled = true; @@ -512,7 +507,7 @@ static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev, rtw_write32(rtwdev, RTK_PCI_HIMR0, 0); rtw_write32(rtwdev, RTK_PCI_HIMR1, 0); - if (rtw_chip_wcpu_11ac(rtwdev)) + if (rtw_chip_wcpu_3081(rtwdev)) rtw_write32(rtwdev, RTK_PCI_HIMR3, 0); rtwpci->irq_enabled = false; @@ -542,7 +537,7 @@ static int rtw_pci_setup(struct rtw_dev *rtwdev) static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci) { struct rtw_pci_tx_ring *tx_ring; - u8 queue; + enum rtw_tx_queue_type queue; rtw_pci_reset_trx_ring(rtwdev); for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) { @@ -551,12 +546,36 @@ static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci) } } +static void rtw_pci_napi_start(struct rtw_dev *rtwdev) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + + if (test_and_set_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags)) + return; + + napi_enable(&rtwpci->napi); +} + +static void rtw_pci_napi_stop(struct rtw_dev *rtwdev) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + + if (!test_and_clear_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags)) + return; + + napi_synchronize(&rtwpci->napi); + napi_disable(&rtwpci->napi); +} + static int rtw_pci_start(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + rtw_pci_napi_start(rtwdev); + spin_lock_bh(&rtwpci->irq_lock); - rtw_pci_enable_interrupt(rtwdev, rtwpci); + rtwpci->running = true; + rtw_pci_enable_interrupt(rtwdev, rtwpci, false); spin_unlock_bh(&rtwpci->irq_lock); return 0; @@ -565,9 +584,17 @@ static int rtw_pci_start(struct rtw_dev *rtwdev) static void rtw_pci_stop(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + struct pci_dev *pdev = rtwpci->pdev; spin_lock_bh(&rtwpci->irq_lock); + rtwpci->running = false; rtw_pci_disable_interrupt(rtwdev, rtwpci); + spin_unlock_bh(&rtwpci->irq_lock); + + synchronize_irq(pdev->irq); + rtw_pci_napi_stop(rtwdev); + + spin_lock_bh(&rtwpci->irq_lock); rtw_pci_dma_release(rtwdev, rtwpci); spin_unlock_bh(&rtwpci->irq_lock); } @@ -576,8 +603,11 @@ static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct rtw_pci_tx_ring *tx_ring; + enum rtw_tx_queue_type queue; bool tx_empty = true; - u8 queue; + + if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE)) + goto enter_deep_ps; lockdep_assert_held(&rtwpci->irq_lock); @@ -604,7 +634,7 @@ static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev) "TX path not empty, cannot enter deep power save state\n"); return; } - +enter_deep_ps: set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags); rtw_power_mode_change(rtwdev, true); } @@ -634,32 +664,6 @@ static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter) spin_unlock_bh(&rtwpci->irq_lock); } -static u8 ac_to_hwq[] = { - [IEEE80211_AC_VO] = RTW_TX_QUEUE_VO, - [IEEE80211_AC_VI] = RTW_TX_QUEUE_VI, - [IEEE80211_AC_BE] = RTW_TX_QUEUE_BE, - [IEEE80211_AC_BK] = RTW_TX_QUEUE_BK, -}; - -static u8 rtw_hw_queue_mapping(struct sk_buff *skb) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - __le16 fc = hdr->frame_control; - u8 q_mapping = skb_get_queue_mapping(skb); - u8 queue; - - if (unlikely(ieee80211_is_beacon(fc))) - queue = RTW_TX_QUEUE_BCN; - else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))) - queue = RTW_TX_QUEUE_MGMT; - else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq))) - queue = ac_to_hwq[IEEE80211_AC_BE]; - else - queue = ac_to_hwq[q_mapping]; - - return queue; -} - static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci, struct rtw_pci_tx_ring *ring) { @@ -672,8 +676,7 @@ static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci, tx_data = rtw_pci_get_tx_data(prev); dma = tx_data->dma; - pci_unmap_single(rtwpci->pdev, dma, prev->len, - PCI_DMA_TODEVICE); + dma_unmap_single(&rtwpci->pdev->dev, dma, prev->len, DMA_TO_DEVICE); dev_kfree_skb_any(prev); } @@ -682,7 +685,7 @@ static void rtw_pci_dma_check(struct rtw_dev *rtwdev, u32 idx) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; - struct rtw_chip_info *chip = rtwdev->chip; + const struct rtw_chip_info *chip = rtwdev->chip; struct rtw_pci_rx_buffer_desc *buf_desc; u32 desc_sz = chip->rx_buf_desc_sz; u16 total_pkt_size; @@ -698,7 +701,76 @@ static void rtw_pci_dma_check(struct rtw_dev *rtwdev, rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX; } -static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue) +static u32 __pci_get_hw_tx_ring_rp(struct rtw_dev *rtwdev, u8 pci_q) +{ + u32 bd_idx_addr = rtw_pci_tx_queue_idx_addr[pci_q]; + u32 bd_idx = rtw_read16(rtwdev, bd_idx_addr + 2); + + return FIELD_GET(TRX_BD_IDX_MASK, bd_idx); +} + +static void __pci_flush_queue(struct rtw_dev *rtwdev, u8 pci_q, bool drop) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + struct rtw_pci_tx_ring *ring = &rtwpci->tx_rings[pci_q]; + u32 cur_rp; + u8 i; + + /* Because the time taked by the I/O in __pci_get_hw_tx_ring_rp is a + * bit dynamic, it's hard to define a reasonable fixed total timeout to + * use read_poll_timeout* helper. Instead, we can ensure a reasonable + * polling times, so we just use for loop with udelay here. + */ + for (i = 0; i < 30; i++) { + cur_rp = __pci_get_hw_tx_ring_rp(rtwdev, pci_q); + if (cur_rp == ring->r.wp) + return; + + udelay(1); + } + + if (!drop) + rtw_dbg(rtwdev, RTW_DBG_UNEXP, + "timed out to flush pci tx ring[%d]\n", pci_q); +} + +static void __rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 pci_queues, + bool drop) +{ + u8 q; + + for (q = 0; q < RTK_MAX_TX_QUEUE_NUM; q++) { + /* Unnecessary to flush BCN, H2C and HI tx queues. */ + if (q == RTW_TX_QUEUE_BCN || q == RTW_TX_QUEUE_H2C || + q == RTW_TX_QUEUE_HI0) + continue; + + if (pci_queues & BIT(q)) + __pci_flush_queue(rtwdev, q, drop); + } +} + +static void rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop) +{ + u32 pci_queues = 0; + u8 i; + + /* If all of the hardware queues are requested to flush, + * flush all of the pci queues. + */ + if (queues == BIT(rtwdev->hw->queues) - 1) { + pci_queues = BIT(RTK_MAX_TX_QUEUE_NUM) - 1; + } else { + for (i = 0; i < rtwdev->hw->queues; i++) + if (queues & BIT(i)) + pci_queues |= BIT(rtw_tx_ac_to_hwq(i)); + } + + __rtw_pci_flush_queues(rtwdev, pci_queues, drop); +} + +static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, + enum rtw_tx_queue_type queue) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct rtw_pci_tx_ring *ring; @@ -708,7 +780,8 @@ static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue) bd_idx = rtw_pci_tx_queue_idx_addr[queue]; spin_lock_bh(&rtwpci->irq_lock); - rtw_pci_deep_ps_leave(rtwdev); + if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE)) + rtw_pci_deep_ps_leave(rtwdev); rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK); spin_unlock_bh(&rtwpci->irq_lock); } @@ -716,7 +789,7 @@ static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue) static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; - u8 queue; + enum rtw_tx_queue_type queue; for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) if (test_and_clear_bit(queue, rtwpci->tx_queued)) @@ -725,10 +798,11 @@ static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev) static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, - struct sk_buff *skb, u8 queue) + struct sk_buff *skb, + enum rtw_tx_queue_type queue) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; - struct rtw_chip_info *chip = rtwdev->chip; + const struct rtw_chip_info *chip = rtwdev->chip; struct rtw_pci_tx_ring *ring; struct rtw_pci_tx_data *tx_data; dma_addr_t dma; @@ -751,10 +825,10 @@ static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev, pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz); memset(pkt_desc, 0, tx_pkt_desc_sz); pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue); - rtw_tx_fill_tx_desc(pkt_info, skb); - dma = pci_map_single(rtwpci->pdev, skb->data, skb->len, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(rtwpci->pdev, dma)) + rtw_tx_fill_tx_desc(rtwdev, pkt_info, skb); + dma = dma_map_single(&rtwpci->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&rtwpci->pdev->dev, dma)) return -EBUSY; /* after this we got dma mapped, there is no way back */ @@ -843,9 +917,9 @@ static int rtw_pci_tx_write(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb) { + enum rtw_tx_queue_type queue = rtw_tx_queue_mapping(skb); struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct rtw_pci_tx_ring *ring; - u8 queue = rtw_hw_queue_mapping(skb); int ret; ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue); @@ -853,10 +927,12 @@ static int rtw_pci_tx_write(struct rtw_dev *rtwdev, return ret; ring = &rtwpci->tx_rings[queue]; + spin_lock_bh(&rtwpci->irq_lock); if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) { ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb)); ring->queue_stopped = true; } + spin_unlock_bh(&rtwpci->irq_lock); return 0; } @@ -871,7 +947,7 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, struct sk_buff *skb; u32 count; u32 bd_idx_addr; - u32 bd_idx, cur_rp; + u32 bd_idx, cur_rp, rp_idx; u16 q_map; ring = &rtwpci->tx_rings[hw_queue]; @@ -880,6 +956,7 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, bd_idx = rtw_read32(rtwdev, bd_idx_addr); cur_rp = bd_idx >> 16; cur_rp &= TRX_BD_IDX_MASK; + rp_idx = ring->r.rp; if (cur_rp >= ring->r.rp) count = cur_rp - ring->r.rp; else @@ -893,8 +970,8 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, break; } tx_data = rtw_pci_get_tx_data(skb); - pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len, - PCI_DMA_TODEVICE); + dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, + DMA_TO_DEVICE); /* just free command packets from host to card */ if (hw_queue == RTW_TX_QUEUE_H2C) { @@ -903,12 +980,15 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, } if (ring->queue_stopped && - avail_desc(ring->r.wp, ring->r.rp, ring->r.len) > 4) { + avail_desc(ring->r.wp, rp_idx, ring->r.len) > 4) { q_map = skb_get_queue_mapping(skb); ieee80211_wake_queue(hw, q_map); ring->queue_stopped = false; } + if (++rp_idx >= ring->r.len) + rp_idx = 0; + skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz); info = IEEE80211_SKB_CB(skb); @@ -932,16 +1012,43 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, ring->r.rp = cur_rp; } -static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, - u8 hw_queue) +static void rtw_pci_rx_isr(struct rtw_dev *rtwdev) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + struct napi_struct *napi = &rtwpci->napi; + + napi_schedule(napi); +} + +static int rtw_pci_get_hw_rx_ring_nr(struct rtw_dev *rtwdev, + struct rtw_pci *rtwpci) { - struct rtw_chip_info *chip = rtwdev->chip; struct rtw_pci_rx_ring *ring; + int count = 0; + u32 tmp, cur_wp; + + ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU]; + tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ); + cur_wp = u32_get_bits(tmp, TRX_BD_HW_IDX_MASK); + if (cur_wp >= ring->r.wp) + count = cur_wp - ring->r.wp; + else + count = ring->r.len - (ring->r.wp - cur_wp); + + return count; +} + +static u32 rtw_pci_rx_napi(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, + u8 hw_queue, u32 limit) +{ + const struct rtw_chip_info *chip = rtwdev->chip; + struct napi_struct *napi = &rtwpci->napi; + struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU]; struct rtw_rx_pkt_stat pkt_stat; struct ieee80211_rx_status rx_status; struct sk_buff *skb, *new; - u32 cur_wp, cur_rp, tmp; - u32 count; + u32 cur_rp = ring->r.rp; + u32 count, rx_done = 0; u32 pkt_offset; u32 pkt_desc_sz = chip->rx_pkt_desc_sz; u32 buf_desc_sz = chip->rx_buf_desc_sz; @@ -949,17 +1056,9 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, u8 *rx_desc; dma_addr_t dma; - ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU]; + count = rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci); + count = min(count, limit); - tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ); - cur_wp = tmp >> 16; - cur_wp &= TRX_BD_IDX_MASK; - if (cur_wp >= ring->r.wp) - count = cur_wp - ring->r.wp; - else - count = ring->r.len - (ring->r.wp - cur_wp); - - cur_rp = ring->r.rp; while (count--) { rtw_pci_dma_check(rtwdev, ring, cur_rp); skb = ring->buf[cur_rp]; @@ -967,7 +1066,7 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE, DMA_FROM_DEVICE); rx_desc = skb->data; - chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status); + rtw_rx_query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status); /* offset from rx_desc to payload */ pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz + @@ -990,9 +1089,11 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, /* remove rx_desc */ skb_pull(new, pkt_offset); + rtw_update_rx_freq_for_invalid(rtwdev, new, &rx_status, &pkt_stat); rtw_rx_stats(rtwdev, pkt_stat.vif, new); memcpy(new->cb, &rx_status, sizeof(rx_status)); - ieee80211_rx_irqsafe(rtwdev->hw, new); + ieee80211_rx_napi(rtwdev->hw, NULL, new, napi); + rx_done++; } next_rp: @@ -1006,8 +1107,13 @@ next_rp: } ring->r.rp = cur_rp; - ring->r.wp = cur_wp; + /* 'rp', the last position we have read, is seen as previous posistion + * of 'wp' that is used to calculate 'count' next time. + */ + ring->r.wp = cur_rp; rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp); + + return rx_done; } static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev, @@ -1019,7 +1125,7 @@ static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev, irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0); irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1); - if (rtw_chip_wcpu_11ac(rtwdev)) + if (rtw_chip_wcpu_3081(rtwdev)) irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3); else irq_status[3] = 0; @@ -1028,7 +1134,7 @@ static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev, irq_status[3] &= rtwpci->irq_mask[3]; rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]); rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]); - if (rtw_chip_wcpu_11ac(rtwdev)) + if (rtw_chip_wcpu_3081(rtwdev)) rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]); spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags); @@ -1057,6 +1163,7 @@ static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev) struct rtw_dev *rtwdev = dev; struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; u32 irq_status[4]; + bool rx = false; spin_lock_bh(&rtwpci->irq_lock); rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status); @@ -1075,11 +1182,16 @@ static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev) rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI); if (irq_status[3] & IMR_H2CDOK) rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C); - if (irq_status[0] & IMR_ROK) - rtw_pci_rx_isr(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU); + if (irq_status[0] & IMR_ROK) { + rtw_pci_rx_isr(rtwdev); + rx = true; + } + if (unlikely(irq_status[0] & IMR_C2HCMD)) + rtw_fw_c2h_cmd_isr(rtwdev); /* all of the jobs for this interrupt have been done */ - rtw_pci_enable_interrupt(rtwdev, rtwpci); + if (rtwpci->running) + rtw_pci_enable_interrupt(rtwdev, rtwpci, rx); spin_unlock_bh(&rtwpci->irq_lock); return IRQ_HANDLED; @@ -1200,6 +1312,9 @@ static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable) u8 value; int ret; + if (rtw_pci_disable_aspm) + return; + ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value); if (ret) { rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret); @@ -1214,11 +1329,33 @@ static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable) rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value); } +static void rtw_pci_clkreq_pad_low(struct rtw_dev *rtwdev, bool enable) +{ + u8 value; + int ret; + + ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value); + if (ret) { + rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret); + return; + } + + if (enable) + value &= ~BIT_CLKREQ_N_PAD; + else + value |= BIT_CLKREQ_N_PAD; + + rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value); +} + static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable) { u8 value; int ret; + if (rtw_pci_disable_aspm) + return; + ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value); if (ret) { rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret); @@ -1247,13 +1384,17 @@ static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter) * throughput. This is probably because the ASPM behavior slightly * varies from different SOC. */ - if (rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1) + if (!(rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)) + return; + + if ((enter && atomic_dec_if_positive(&rtwpci->link_usage) == 0) || + (!enter && atomic_inc_return(&rtwpci->link_usage) == 1)) rtw_pci_aspm_set(rtwdev, enter); } static void rtw_pci_link_cfg(struct rtw_dev *rtwdev) { - struct rtw_chip_info *chip = rtwdev->chip; + const struct rtw_chip_info *chip = rtwdev->chip; struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct pci_dev *pdev = rtwpci->pdev; u16 link_ctrl; @@ -1295,7 +1436,7 @@ static void rtw_pci_link_cfg(struct rtw_dev *rtwdev) static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev) { - struct rtw_chip_info *chip = rtwdev->chip; + const struct rtw_chip_info *chip = rtwdev->chip; switch (chip->id) { case RTW_CHIP_TYPE_8822C: @@ -1310,12 +1451,16 @@ static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev) static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev) { - struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + const struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct pci_dev *pdev = rtwpci->pdev; const struct rtw_intf_phy_para *para; u16 cut; u16 value; u16 offset; int i; + int ret; cut = BIT(0) << rtwdev->hal.cut_version; @@ -1348,15 +1493,41 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev) } rtw_pci_link_cfg(rtwdev); + + /* Disable 8821ce completion timeout by default */ + if (chip->id == RTW_CHIP_TYPE_8821C) { + ret = pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_COMP_TMOUT_DIS); + if (ret) + rtw_err(rtwdev, "failed to set PCI cap, ret = %d\n", + ret); + } + + if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 5) + rtw_write32_mask(rtwdev, REG_ANAPARSW_MAC_0, BIT_CF_L_V2, 0x1); } static int __maybe_unused rtw_pci_suspend(struct device *dev) { + struct ieee80211_hw *hw = dev_get_drvdata(dev); + struct rtw_dev *rtwdev = hw->priv; + const struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_efuse *efuse = &rtwdev->efuse; + + if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6) + rtw_pci_clkreq_pad_low(rtwdev, true); return 0; } static int __maybe_unused rtw_pci_resume(struct device *dev) { + struct ieee80211_hw *hw = dev_get_drvdata(dev); + struct rtw_dev *rtwdev = hw->priv; + const struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_efuse *efuse = &rtwdev->efuse; + + if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6) + rtw_pci_clkreq_pad_low(rtwdev, false); return 0; } @@ -1382,7 +1553,6 @@ static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev) static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev) { - pci_clear_master(pdev); pci_disable_device(pdev); } @@ -1422,15 +1592,18 @@ static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev) rtw_pci_io_unmapping(rtwdev, pdev); } -static struct rtw_hci_ops rtw_pci_ops = { +static const struct rtw_hci_ops rtw_pci_ops = { .tx_write = rtw_pci_tx_write, .tx_kick_off = rtw_pci_tx_kick_off, + .flush_queues = rtw_pci_flush_queues, .setup = rtw_pci_setup, .start = rtw_pci_start, .stop = rtw_pci_stop, .deep_ps = rtw_pci_deep_ps, .link_ps = rtw_pci_link_ps, .interface_cfg = rtw_pci_interface_cfg, + .dynamic_rx_agg = NULL, + .write_firmware_page = rtw_write_firmware_page, .read8 = rtw_pci_read8, .read16 = rtw_pci_read16, @@ -1444,7 +1617,7 @@ static struct rtw_hci_ops rtw_pci_ops = { static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev) { - unsigned int flags = PCI_IRQ_LEGACY; + unsigned int flags = PCI_IRQ_INTX; int ret; if (!rtw_disable_msi) @@ -1474,11 +1647,110 @@ static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev) pci_free_irq_vectors(pdev); } +static int rtw_pci_napi_poll(struct napi_struct *napi, int budget) +{ + struct rtw_pci *rtwpci = container_of(napi, struct rtw_pci, napi); + struct rtw_dev *rtwdev = container_of((void *)rtwpci, struct rtw_dev, + priv); + int work_done = 0; + + if (rtwpci->rx_no_aspm) + rtw_pci_link_ps(rtwdev, false); + + while (work_done < budget) { + u32 work_done_once; + + work_done_once = rtw_pci_rx_napi(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU, + budget - work_done); + if (work_done_once == 0) + break; + work_done += work_done_once; + } + if (work_done < budget) { + napi_complete_done(napi, work_done); + spin_lock_bh(&rtwpci->irq_lock); + if (rtwpci->running) + rtw_pci_enable_interrupt(rtwdev, rtwpci, false); + spin_unlock_bh(&rtwpci->irq_lock); + /* When ISR happens during polling and before napi_complete + * while no further data is received. Data on the dma_ring will + * not be processed immediately. Check whether dma ring is + * empty and perform napi_schedule accordingly. + */ + if (rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci)) + napi_schedule(napi); + } + if (rtwpci->rx_no_aspm) + rtw_pci_link_ps(rtwdev, true); + + return work_done; +} + +static int rtw_pci_napi_init(struct rtw_dev *rtwdev) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + + rtwpci->netdev = alloc_netdev_dummy(0); + if (!rtwpci->netdev) + return -ENOMEM; + + netif_napi_add(rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll); + return 0; +} + +static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + + rtw_pci_napi_stop(rtwdev); + netif_napi_del(&rtwpci->napi); + free_netdev(rtwpci->netdev); +} + +static pci_ers_result_t rtw_pci_io_err_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + netif_device_detach(netdev); + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t rtw_pci_io_slot_reset(struct pci_dev *pdev) +{ + struct ieee80211_hw *hw = pci_get_drvdata(pdev); + struct rtw_dev *rtwdev = hw->priv; + + rtw_fw_recovery(rtwdev); + + return PCI_ERS_RESULT_RECOVERED; +} + +static void rtw_pci_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + /* ack any pending wake events, disable PME */ + pci_enable_wake(pdev, PCI_D0, 0); + + netif_device_attach(netdev); +} + +const struct pci_error_handlers rtw_pci_err_handler = { + .error_detected = rtw_pci_io_err_detected, + .slot_reset = rtw_pci_io_slot_reset, + .resume = rtw_pci_io_resume, +}; +EXPORT_SYMBOL(rtw_pci_err_handler); + int rtw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + struct pci_dev *bridge = pci_upstream_bridge(pdev); struct ieee80211_hw *hw; struct rtw_dev *rtwdev; + struct rtw_pci *rtwpci; int drv_data_size; int ret; @@ -1496,6 +1768,9 @@ int rtw_pci_probe(struct pci_dev *pdev, rtwdev->hci.ops = &rtw_pci_ops; rtwdev->hci.type = RTW_HCI_TYPE_PCIE; + rtwpci = (struct rtw_pci *)rtwdev->priv; + atomic_set(&rtwpci->link_usage, 1); + ret = rtw_core_init(rtwdev); if (ret) goto err_release_hw; @@ -1516,12 +1791,22 @@ int rtw_pci_probe(struct pci_dev *pdev, goto err_pci_declaim; } + ret = rtw_pci_napi_init(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to setup NAPI\n"); + goto err_pci_declaim; + } + ret = rtw_chip_info_setup(rtwdev); if (ret) { rtw_err(rtwdev, "failed to setup chip information\n"); goto err_destroy_pci; } + /* Disable PCIe ASPM L1 while doing NAPI poll for 8821CE */ + if (rtwdev->chip->id == RTW_CHIP_TYPE_8821C && bridge->vendor == PCI_VENDOR_ID_INTEL) + rtwpci->rx_no_aspm = true; + rtw_pci_phy_cfg(rtwdev); ret = rtw_register_hw(rtwdev, hw); @@ -1539,6 +1824,7 @@ int rtw_pci_probe(struct pci_dev *pdev, return 0; err_destroy_pci: + rtw_pci_napi_deinit(rtwdev); rtw_pci_destroy(rtwdev, pdev); err_pci_declaim: @@ -1568,6 +1854,7 @@ void rtw_pci_remove(struct pci_dev *pdev) rtw_unregister_hw(rtwdev, hw); rtw_pci_disable_interrupt(rtwdev, rtwpci); + rtw_pci_napi_deinit(rtwdev); rtw_pci_destroy(rtwdev, pdev); rtw_pci_declaim(rtwdev, pdev); rtw_pci_free_irq(rtwdev, pdev); @@ -1580,7 +1867,7 @@ void rtw_pci_shutdown(struct pci_dev *pdev) { struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct rtw_dev *rtwdev; - struct rtw_chip_info *chip; + const struct rtw_chip_info *chip; if (!hw) return; @@ -1590,9 +1877,11 @@ void rtw_pci_shutdown(struct pci_dev *pdev) if (chip->ops->shutdown) chip->ops->shutdown(rtwdev); + + pci_set_power_state(pdev, PCI_D3hot); } EXPORT_SYMBOL(rtw_pci_shutdown); MODULE_AUTHOR("Realtek Corporation"); -MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver"); +MODULE_DESCRIPTION("Realtek PCI 802.11ac wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); |
