summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/realtek/rtw88/pci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/realtek/rtw88/pci.c')
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c236
1 files changed, 213 insertions, 23 deletions
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index d90928be663b..a58e8276a41a 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -9,6 +9,7 @@
#include "tx.h"
#include "rx.h"
#include "fw.h"
+#include "ps.h"
#include "debug.h"
static bool rtw_disable_msi;
@@ -457,9 +458,9 @@ static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
/* reset read/write point */
rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
- /* rest H2C Queue index */
- rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HOST_IDX);
- rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HW_IDX);
+ /* reset H2C Queue index in a single write */
+ rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
+ BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
}
static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
@@ -536,6 +537,69 @@ static void rtw_pci_stop(struct rtw_dev *rtwdev)
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
}
+static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ struct rtw_pci_tx_ring *tx_ring;
+ bool tx_empty = true;
+ u8 queue;
+
+ lockdep_assert_held(&rtwpci->irq_lock);
+
+ /* Deep PS state is not allowed to TX-DMA */
+ for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
+ /* BCN queue is rsvd page, does not have DMA interrupt
+ * H2C queue is managed by firmware
+ */
+ if (queue == RTW_TX_QUEUE_BCN ||
+ queue == RTW_TX_QUEUE_H2C)
+ continue;
+
+ tx_ring = &rtwpci->tx_rings[queue];
+
+ /* check if there is any skb DMAing */
+ if (skb_queue_len(&tx_ring->queue)) {
+ tx_empty = false;
+ break;
+ }
+ }
+
+ if (!tx_empty) {
+ rtw_dbg(rtwdev, RTW_DBG_PS,
+ "TX path not empty, cannot enter deep power save state\n");
+ return;
+ }
+
+ set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
+ rtw_power_mode_change(rtwdev, true);
+}
+
+static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ lockdep_assert_held(&rtwpci->irq_lock);
+
+ if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
+ rtw_power_mode_change(rtwdev, false);
+}
+
+static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtwpci->irq_lock, flags);
+
+ if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
+ rtw_pci_deep_ps_enter(rtwdev);
+
+ if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
+ rtw_pci_deep_ps_leave(rtwdev);
+
+ spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+}
+
static u8 ac_to_hwq[] = {
[IEEE80211_AC_VO] = RTW_TX_QUEUE_VO,
[IEEE80211_AC_VI] = RTW_TX_QUEUE_VI,
@@ -616,6 +680,7 @@ static int rtw_pci_xmit(struct rtw_dev *rtwdev,
u8 *pkt_desc;
struct rtw_pci_tx_buffer_desc *buf_desc;
u32 bd_idx;
+ unsigned long flags;
ring = &rtwpci->tx_rings[queue];
@@ -651,6 +716,10 @@ static int rtw_pci_xmit(struct rtw_dev *rtwdev,
tx_data = rtw_pci_get_tx_data(skb);
tx_data->dma = dma;
tx_data->sn = pkt_info->sn;
+
+ spin_lock_irqsave(&rtwpci->irq_lock, flags);
+
+ rtw_pci_deep_ps_leave(rtwdev);
skb_queue_tail(&ring->queue, skb);
/* kick off tx queue */
@@ -666,6 +735,7 @@ static int rtw_pci_xmit(struct rtw_dev *rtwdev,
reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
}
+ spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
return 0;
}
@@ -990,23 +1060,49 @@ static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
{
u16 write_addr;
- u16 remainder = addr & 0x3;
+ u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK);
u8 flag;
- u8 cnt = 20;
+ u8 cnt;
- write_addr = ((addr & 0x0ffc) | (BIT(0) << (remainder + 12)));
+ write_addr = addr & BITS_DBI_ADDR_MASK;
+ write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN);
rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
- rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, 0x01);
+ rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16);
+
+ for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
+ flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
+ if (flag == 0)
+ return;
- flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
- while (flag && (cnt != 0)) {
udelay(10);
+ }
+
+ WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr);
+}
+
+static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value)
+{
+ u16 read_addr = addr & BITS_DBI_ADDR_MASK;
+ u8 flag;
+ u8 cnt;
+
+ rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr);
+ rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16);
+
+ for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
- cnt--;
+ if (flag == 0) {
+ read_addr = REG_DBI_RDATA_V1 + (addr & 3);
+ *value = rtw_read8(rtwdev, read_addr);
+ return 0;
+ }
+
+ udelay(10);
}
- WARN(flag, "DBI write fail\n");
+ WARN(1, "failed to read DBI register, addr=0x%04x\n", addr);
+ return -EIO;
}
static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
@@ -1017,23 +1113,113 @@ static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
rtw_write16(rtwdev, REG_MDIO_V1, data);
- page = addr < 0x20 ? 0 : 1;
- page += g1 ? 0 : 2;
- rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & 0x1f);
+ page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1;
+ page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2;
+ rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK);
rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
-
rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
- wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1);
- cnt = 20;
- while (wflag && (cnt != 0)) {
- udelay(10);
+ for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
BIT_MDIO_WFLAG_V1);
- cnt--;
+ if (wflag == 0)
+ return;
+
+ udelay(10);
+ }
+
+ WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr);
+}
+
+static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 value;
+ int ret;
+
+ ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
+ if (ret) {
+ rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
+ return;
}
- WARN(wflag, "MDIO write fail\n");
+ if (enable)
+ value |= BIT_CLKREQ_SW_EN;
+ else
+ value &= ~BIT_CLKREQ_SW_EN;
+
+ rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
+}
+
+static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 value;
+ int ret;
+
+ ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
+ if (ret) {
+ rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret);
+ return;
+ }
+
+ if (enable)
+ value |= BIT_L1_SW_EN;
+ else
+ value &= ~BIT_L1_SW_EN;
+
+ rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
+}
+
+static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ /* Like CLKREQ, ASPM is also implemented by two HW modules, and can
+ * only be enabled when host supports it.
+ *
+ * And ASPM mechanism should be enabled when driver/firmware enters
+ * power save mode, without having heavy traffic. Because we've
+ * experienced some inter-operability issues that the link tends
+ * to enter L1 state on the fly even when driver is having high
+ * throughput. This is probably because the ASPM behavior slightly
+ * varies from different SOC.
+ */
+ if (rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)
+ rtw_pci_aspm_set(rtwdev, enter);
+}
+
+static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
+ u16 link_ctrl;
+ int ret;
+
+ /* Though there is standard PCIE configuration space to set the
+ * link control register, but by Realtek's design, driver should
+ * check if host supports CLKREQ/ASPM to enable the HW module.
+ *
+ * These functions are implemented by two HW modules associated,
+ * one is responsible to access PCIE configuration space to
+ * follow the host settings, and another is in charge of doing
+ * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
+ * the host does not support it, and due to some reasons or wrong
+ * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
+ * loss if HW misbehaves on the link.
+ *
+ * Hence it's designed that driver should first check the PCIE
+ * configuration space is sync'ed and enabled, then driver can turn
+ * on the other module that is actually working on the mechanism.
+ */
+ ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
+ if (ret) {
+ rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
+ return;
+ }
+
+ if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
+ rtw_pci_clkreq_set(rtwdev, true);
+
+ rtwpci->link_ctrl = link_ctrl;
}
static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
@@ -1074,6 +1260,8 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
else
rtw_dbi_write8(rtwdev, offset, value);
}
+
+ rtw_pci_link_cfg(rtwdev);
}
static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
@@ -1120,8 +1308,6 @@ static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
goto err_io_unmap;
}
- rtw_pci_phy_cfg(rtwdev);
-
return 0;
err_io_unmap:
@@ -1142,6 +1328,8 @@ static struct rtw_hci_ops rtw_pci_ops = {
.setup = rtw_pci_setup,
.start = rtw_pci_start,
.stop = rtw_pci_stop,
+ .deep_ps = rtw_pci_deep_ps,
+ .link_ps = rtw_pci_link_ps,
.read8 = rtw_pci_read8,
.read16 = rtw_pci_read16,
@@ -1233,6 +1421,8 @@ static int rtw_pci_probe(struct pci_dev *pdev,
goto err_destroy_pci;
}
+ rtw_pci_phy_cfg(rtwdev);
+
ret = rtw_register_hw(rtwdev, hw);
if (ret) {
rtw_err(rtwdev, "failed to register hw\n");