summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/mediatek/mt76/mt7996/mac.c')
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c1927
1 files changed, 1262 insertions, 665 deletions
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index 130eb7b4fd91..2560e2f46e89 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2022 MediaTek Inc.
*/
@@ -13,10 +13,6 @@
#define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2)
-#define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f)
-#define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
- IEEE80211_RADIOTAP_HE_##f)
-
static const struct mt7996_dfs_radar_spec etsi_radar_specs = {
.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
.radar_pattern = {
@@ -57,26 +53,45 @@ static const struct mt7996_dfs_radar_spec jp_radar_specs = {
};
static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev,
- u16 idx, bool unicast)
+ u16 idx, u8 band_idx)
{
- struct mt7996_sta *sta;
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_sta *msta;
+ struct mt7996_vif *mvif;
struct mt76_wcid *wcid;
+ int i;
+
+ wcid = mt76_wcid_ptr(dev, idx);
+ if (!wcid || !wcid->sta)
+ return NULL;
- if (idx >= ARRAY_SIZE(dev->mt76.wcid))
+ if (!mt7996_band_valid(dev, band_idx))
return NULL;
- wcid = rcu_dereference(dev->mt76.wcid[idx]);
- if (unicast || !wcid)
+ if (wcid->phy_idx == band_idx)
return wcid;
- if (!wcid->sta)
+ msta_link = container_of(wcid, struct mt7996_sta_link, wcid);
+ msta = msta_link->sta;
+ if (!msta || !msta->vif)
return NULL;
- sta = container_of(wcid, struct mt7996_sta, wcid);
- if (!sta->vif)
- return NULL;
+ mvif = msta->vif;
+ for (i = 0; i < ARRAY_SIZE(mvif->mt76.link); i++) {
+ struct mt76_vif_link *mlink;
+
+ mlink = rcu_dereference(mvif->mt76.link[i]);
+ if (!mlink)
+ continue;
+
+ if (mlink->band_idx != band_idx)
+ continue;
- return &sta->vif->sta.wcid;
+ msta_link = rcu_dereference(msta->link[i]);
+ break;
+ }
+
+ return &msta_link->wcid;
}
bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask)
@@ -104,16 +119,18 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
[IEEE80211_AC_VI] = 4,
[IEEE80211_AC_VO] = 6
};
+ struct mt7996_sta_link *msta_link;
+ struct mt76_vif_link *mlink;
struct ieee80211_sta *sta;
struct mt7996_sta *msta;
- struct rate_info *rate;
u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
LIST_HEAD(sta_poll_list);
+ struct mt76_wcid *wcid;
int i;
- spin_lock_bh(&dev->sta_poll_lock);
- list_splice_init(&dev->sta_poll_list, &sta_poll_list);
- spin_unlock_bh(&dev->sta_poll_lock);
+ spin_lock_bh(&dev->mt76.sta_poll_lock);
+ list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
rcu_read_lock();
@@ -122,32 +139,34 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
u32 addr, val;
u16 idx;
s8 rssi[4];
- u8 bw;
- spin_lock_bh(&dev->sta_poll_lock);
+ spin_lock_bh(&dev->mt76.sta_poll_lock);
if (list_empty(&sta_poll_list)) {
- spin_unlock_bh(&dev->sta_poll_lock);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
break;
}
- msta = list_first_entry(&sta_poll_list,
- struct mt7996_sta, poll_list);
- list_del_init(&msta->poll_list);
- spin_unlock_bh(&dev->sta_poll_lock);
+ msta_link = list_first_entry(&sta_poll_list,
+ struct mt7996_sta_link,
+ wcid.poll_list);
+ msta = msta_link->sta;
+ wcid = &msta_link->wcid;
+ list_del_init(&wcid->poll_list);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
- idx = msta->wcid.idx;
+ idx = wcid->idx;
/* refresh peer's airtime reporting */
addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 20);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- u32 tx_last = msta->airtime_ac[i];
- u32 rx_last = msta->airtime_ac[i + 4];
+ u32 tx_last = msta_link->airtime_ac[i];
+ u32 rx_last = msta_link->airtime_ac[i + 4];
- msta->airtime_ac[i] = mt76_rr(dev, addr);
- msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
+ msta_link->airtime_ac[i] = mt76_rr(dev, addr);
+ msta_link->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
- tx_time[i] = msta->airtime_ac[i] - tx_last;
- rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
+ tx_time[i] = msta_link->airtime_ac[i] - tx_last;
+ rx_time[i] = msta_link->airtime_ac[i + 4] - rx_last;
if ((tx_last | rx_last) & BIT(30))
clear = true;
@@ -158,10 +177,11 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
if (clear) {
mt7996_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
+ memset(msta_link->airtime_ac, 0,
+ sizeof(msta_link->airtime_ac));
}
- if (!msta->wcid.sta)
+ if (!wcid->sta)
continue;
sta = container_of((void *)msta, struct ieee80211_sta,
@@ -178,49 +198,6 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
ieee80211_sta_register_airtime(sta, tid, tx_cur, rx_cur);
}
- /* We don't support reading GI info from txs packets.
- * For accurate tx status reporting and AQL improvement,
- * we need to make sure that flags match so polling GI
- * from per-sta counters directly.
- */
- rate = &msta->wcid.rate;
-
- switch (rate->bw) {
- case RATE_INFO_BW_320:
- bw = IEEE80211_STA_RX_BW_320;
- break;
- case RATE_INFO_BW_160:
- bw = IEEE80211_STA_RX_BW_160;
- break;
- case RATE_INFO_BW_80:
- bw = IEEE80211_STA_RX_BW_80;
- break;
- case RATE_INFO_BW_40:
- bw = IEEE80211_STA_RX_BW_40;
- break;
- default:
- bw = IEEE80211_STA_RX_BW_20;
- break;
- }
-
- addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 6);
- val = mt76_rr(dev, addr);
- if (rate->flags & RATE_INFO_FLAGS_EHT_MCS) {
- addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 5);
- val = mt76_rr(dev, addr);
- rate->eht_gi = FIELD_GET(GENMASK(25, 24), val);
- } else if (rate->flags & RATE_INFO_FLAGS_HE_MCS) {
- u8 offs = 24 + 2 * bw;
-
- rate->he_gi = (val & (0x3 << offs)) >> offs;
- } else if (rate->flags &
- (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) {
- if (val & BIT(12 + bw))
- rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
- else
- rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI;
- }
-
/* get signal strength of resp frames (CTS/BA/ACK) */
addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 34);
val = mt76_rr(dev, addr);
@@ -230,211 +207,21 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
rssi[2] = to_rssi(GENMASK(23, 16), val);
rssi[3] = to_rssi(GENMASK(31, 14), val);
- msta->ack_signal =
- mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
-
- ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal);
- }
-
- rcu_read_unlock();
-}
-
-void mt7996_mac_enable_rtscts(struct mt7996_dev *dev,
- struct ieee80211_vif *vif, bool enable)
-{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- u32 addr;
-
- addr = mt7996_mac_wtbl_lmac_addr(dev, mvif->sta.wcid.idx, 5);
- if (enable)
- mt76_set(dev, addr, BIT(5));
- else
- mt76_clear(dev, addr, BIT(5));
-}
-
-void mt7996_mac_set_fixed_rate_table(struct mt7996_dev *dev,
- u8 tbl_idx, u16 rate_idx)
-{
- u32 ctrl = MT_WTBL_ITCR_WR | MT_WTBL_ITCR_EXEC | tbl_idx;
-
- mt76_wr(dev, MT_WTBL_ITDR0, rate_idx);
- /* use wtbl spe idx */
- mt76_wr(dev, MT_WTBL_ITDR1, MT_WTBL_SPE_IDX_SEL);
- mt76_wr(dev, MT_WTBL_ITCR, ctrl);
-}
-
-static void
-mt7996_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
- struct ieee80211_radiotap_he *he,
- __le32 *rxv)
-{
- u32 ru, offs = 0;
-
- ru = le32_get_bits(rxv[0], MT_PRXV_HE_RU_ALLOC);
-
- status->bw = RATE_INFO_BW_HE_RU;
-
- switch (ru) {
- case 0 ... 36:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
- offs = ru;
- break;
- case 37 ... 52:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
- offs = ru - 37;
- break;
- case 53 ... 60:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
- offs = ru - 53;
- break;
- case 61 ... 64:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
- offs = ru - 61;
- break;
- case 65 ... 66:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
- offs = ru - 65;
- break;
- case 67:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
- break;
- case 68:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
- break;
- }
-
- he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
- he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) |
- le16_encode_bits(offs,
- IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
-}
-
-static void
-mt7996_mac_decode_he_mu_radiotap(struct sk_buff *skb, __le32 *rxv)
-{
- struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
- static const struct ieee80211_radiotap_he_mu mu_known = {
- .flags1 = HE_BITS(MU_FLAGS1_SIG_B_MCS_KNOWN) |
- HE_BITS(MU_FLAGS1_SIG_B_DCM_KNOWN) |
- HE_BITS(MU_FLAGS1_CH1_RU_KNOWN) |
- HE_BITS(MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN),
- .flags2 = HE_BITS(MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
- };
- struct ieee80211_radiotap_he_mu *he_mu = NULL;
-
- status->flag |= RX_FLAG_RADIOTAP_HE_MU;
-
- he_mu = skb_push(skb, sizeof(mu_known));
- memcpy(he_mu, &mu_known, sizeof(mu_known));
-
-#define MU_PREP(f, v) le16_encode_bits(v, IEEE80211_RADIOTAP_HE_MU_##f)
+ mlink = rcu_dereference(msta->vif->mt76.link[wcid->link_id]);
+ if (mlink) {
+ struct mt76_phy *mphy = mt76_vif_link_phy(mlink);
- he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_MCS, status->rate_idx);
- if (status->he_dcm)
- he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_DCM, status->he_dcm);
-
- he_mu->flags2 |= MU_PREP(FLAGS2_BW_FROM_SIG_A_BW, status->bw) |
- MU_PREP(FLAGS2_SIG_B_SYMS_USERS,
- le32_get_bits(rxv[4], MT_CRXV_HE_NUM_USER));
-
- he_mu->ru_ch1[0] = le32_get_bits(rxv[16], MT_CRXV_HE_RU0) & 0xff;
-
- if (status->bw >= RATE_INFO_BW_40) {
- he_mu->flags1 |= HE_BITS(MU_FLAGS1_CH2_RU_KNOWN);
- he_mu->ru_ch2[0] = le32_get_bits(rxv[16], MT_CRXV_HE_RU1) & 0xff;
- }
-
- if (status->bw >= RATE_INFO_BW_80) {
- u32 ru_h, ru_l;
-
- he_mu->ru_ch1[1] = le32_get_bits(rxv[16], MT_CRXV_HE_RU2) & 0xff;
+ if (mphy)
+ msta_link->ack_signal =
+ mt76_rx_signal(mphy->antenna_mask,
+ rssi);
+ }
- ru_l = le32_get_bits(rxv[16], MT_CRXV_HE_RU3_L);
- ru_h = le32_get_bits(rxv[17], MT_CRXV_HE_RU3_H) & 0x7;
- he_mu->ru_ch2[1] = (u8)(ru_l | ru_h << 4);
+ ewma_avg_signal_add(&msta_link->avg_ack_signal,
+ -msta_link->ack_signal);
}
-}
-
-static void
-mt7996_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u8 mode)
-{
- struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
- static const struct ieee80211_radiotap_he known = {
- .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
- HE_BITS(DATA1_DATA_DCM_KNOWN) |
- HE_BITS(DATA1_STBC_KNOWN) |
- HE_BITS(DATA1_CODING_KNOWN) |
- HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) |
- HE_BITS(DATA1_DOPPLER_KNOWN) |
- HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
- HE_BITS(DATA1_BSS_COLOR_KNOWN),
- .data2 = HE_BITS(DATA2_GI_KNOWN) |
- HE_BITS(DATA2_TXBF_KNOWN) |
- HE_BITS(DATA2_PE_DISAMBIG_KNOWN) |
- HE_BITS(DATA2_TXOP_KNOWN),
- };
- struct ieee80211_radiotap_he *he = NULL;
- u32 ltf_size = le32_get_bits(rxv[4], MT_CRXV_HE_LTF_SIZE) + 1;
-
- status->flag |= RX_FLAG_RADIOTAP_HE;
-
- he = skb_push(skb, sizeof(known));
- memcpy(he, &known, sizeof(known));
-
- he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[9]) |
- HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[4]);
- he->data4 = HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[13]);
- he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[5]) |
- le16_encode_bits(ltf_size,
- IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
- if (le32_to_cpu(rxv[0]) & MT_PRXV_TXBF)
- he->data5 |= HE_BITS(DATA5_TXBF);
- he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[9]) |
- HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[9]);
-
- switch (mode) {
- case MT_PHY_TYPE_HE_SU:
- he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
- HE_BITS(DATA1_UL_DL_KNOWN) |
- HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
- HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
-
- he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[8]) |
- HE_PREP(DATA3_UL_DL, UPLINK, rxv[5]);
- break;
- case MT_PHY_TYPE_HE_EXT_SU:
- he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
- HE_BITS(DATA1_UL_DL_KNOWN) |
- HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
- he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[5]);
- break;
- case MT_PHY_TYPE_HE_MU:
- he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
- HE_BITS(DATA1_UL_DL_KNOWN);
-
- he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[5]);
- he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[8]);
-
- mt7996_mac_decode_he_radiotap_ru(status, he, rxv);
- mt7996_mac_decode_he_mu_radiotap(skb, rxv);
- break;
- case MT_PHY_TYPE_HE_TB:
- he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
- HE_BITS(DATA1_SPTL_REUSE2_KNOWN) |
- HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
- HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
-
- he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[13]) |
- HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[13]) |
- HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[13]) |
- HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[13]);
-
- mt7996_mac_decode_he_radiotap_ru(status, he, rxv);
- break;
- default:
- break;
- }
+ rcu_read_unlock();
}
/* The HW does not translate the mac header to 802.3 for mesh point */
@@ -442,7 +229,9 @@ static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
- struct mt7996_sta *msta = (struct mt7996_sta *)status->wcid;
+ struct mt7996_sta_link *msta_link = (void *)status->wcid;
+ struct mt7996_sta *msta = msta_link->sta;
+ struct ieee80211_bss_conf *link_conf;
__le32 *rxd = (__le32 *)skb->data;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
@@ -459,8 +248,11 @@ static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
if (!msta || !msta->vif)
return -EINVAL;
- sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
+ sta = wcid_to_sta(status->wcid);
vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
+ link_conf = rcu_dereference(vif->link_conf[msta_link->wcid.link_id]);
+ if (!link_conf)
+ return -EINVAL;
/* store the info from RXD and ethhdr to avoid being overridden */
frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL);
@@ -473,7 +265,7 @@ static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
switch (frame_control & (IEEE80211_FCTL_TODS |
IEEE80211_FCTL_FROMDS)) {
case 0:
- ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
+ ether_addr_copy(hdr.addr3, link_conf->bssid);
break;
case IEEE80211_FCTL_FROMDS:
ether_addr_copy(hdr.addr3, eth_hdr->h_source);
@@ -611,7 +403,9 @@ mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
case IEEE80211_STA_RX_BW_160:
status->bw = RATE_INFO_BW_160;
break;
+ /* rxv reports bw 320-1 and 320-2 separately */
case IEEE80211_STA_RX_BW_320:
+ case IEEE80211_STA_RX_BW_320 + 1:
status->bw = RATE_INFO_BW_320;
break;
default:
@@ -625,8 +419,36 @@ mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
return 0;
}
+static void
+mt7996_wed_check_ppe(struct mt7996_dev *dev, struct mt76_queue *q,
+ struct mt7996_sta *msta, struct sk_buff *skb,
+ u32 info)
+{
+ struct ieee80211_vif *vif;
+ struct wireless_dev *wdev;
+
+ if (!msta || !msta->vif)
+ return;
+
+ if (!mt76_queue_is_wed_rx(q))
+ return;
+
+ if (!(info & MT_DMA_INFO_PPE_VLD))
+ return;
+
+ vif = container_of((void *)msta->vif, struct ieee80211_vif,
+ drv_priv);
+ wdev = ieee80211_vif_to_wdev(vif);
+ skb->dev = wdev->netdev;
+
+ mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb,
+ FIELD_GET(MT_DMA_PPE_CPU_REASON, info),
+ FIELD_GET(MT_DMA_PPE_ENTRY, info));
+}
+
static int
-mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
+mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
+ struct sk_buff *skb, u32 *info)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct mt76_phy *mphy = &dev->mt76.phy;
@@ -639,7 +461,7 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
u32 rxd2 = le32_to_cpu(rxd[2]);
u32 rxd3 = le32_to_cpu(rxd[3]);
u32 rxd4 = le32_to_cpu(rxd[4]);
- u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
+ u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM;
u32 csum_status = *(u32 *)skb->cb;
u32 mesh_mask = MT_RXD0_MESH | MT_RXD0_MHCP;
bool is_mesh = (rxd0 & mesh_mask) == mesh_mask;
@@ -651,7 +473,10 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
u16 seq_ctrl = 0;
__le16 fc = 0;
int idx;
+ u8 hw_aggr = false;
+ struct mt7996_sta *msta = NULL;
+ hw_aggr = status->aggr;
memset(status, 0, sizeof(*status));
band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1);
@@ -675,16 +500,15 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
- status->wcid = mt7996_rx_get_wcid(dev, idx, unicast);
+ status->wcid = mt7996_rx_get_wcid(dev, idx, band_idx);
if (status->wcid) {
- struct mt7996_sta *msta;
+ struct mt7996_sta_link *msta_link;
- msta = container_of(status->wcid, struct mt7996_sta, wcid);
- spin_lock_bh(&dev->sta_poll_lock);
- if (list_empty(&msta->poll_list))
- list_add_tail(&msta->poll_list, &dev->sta_poll_list);
- spin_unlock_bh(&dev->sta_poll_lock);
+ msta_link = container_of(status->wcid, struct mt7996_sta_link,
+ wcid);
+ msta = msta_link->sta;
+ mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid);
}
status->freq = mphy->chandef.chan->center_freq;
@@ -699,7 +523,7 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
if (!sband->channels)
return -EINVAL;
- if ((rxd0 & csum_mask) == csum_mask &&
+ if ((rxd3 & csum_mask) == csum_mask &&
!(csum_status & (BIT(0) | BIT(2) | BIT(3))))
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -825,6 +649,14 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
}
+ /* IEEE 802.11 fragmentation can only be applied to unicast frames.
+ * Hence, drop fragments with multicast/broadcast RA.
+ * This check fixes vulnerabilities, like CVE-2020-26145.
+ */
+ if ((ieee80211_has_morefrags(fc) || seq_ctrl & IEEE80211_SCTL_FRAG) &&
+ FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) != MT_RXD3_NORMAL_U2M)
+ return -EINVAL;
+
hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
if (hdr_trans && ieee80211_has_morefrags(fc)) {
if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap))
@@ -836,14 +668,19 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
skb_pull(skb, hdr_gap);
if (!hdr_trans && status->amsdu && !(ieee80211_has_a4(fc) && is_mesh)) {
pad_start = ieee80211_get_hdrlen_from_skb(skb);
- } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR) &&
- get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) {
+ } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
/* When header translation failure is indicated,
* the hardware will insert an extra 2-byte field
* containing the data length after the protocol
- * type field.
+ * type field. This happens either when the LLC-SNAP
+ * pattern did not match, or if a VLAN header was
+ * detected.
*/
- pad_start = 16;
+ pad_start = 12;
+ if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
+ pad_start += 4;
+ else
+ pad_start = 0;
}
if (pad_start) {
@@ -876,14 +713,28 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
if (ieee80211_has_a4(fc) && is_mesh && status->amsdu)
*qos &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
}
+ skb_set_mac_header(skb, (unsigned char *)hdr - skb->data);
} else {
status->flag |= RX_FLAG_8023;
+ mt7996_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb,
+ *info);
+ mt76_npu_check_ppe(&dev->mt76, skb, *info);
}
- if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
- mt7996_mac_decode_he_radiotap(skb, rxv, mode);
+ if (rxv && !(status->flag & RX_FLAG_8023)) {
+ switch (status->encoding) {
+ case RX_ENC_EHT:
+ mt76_connac3_mac_decode_eht_radiotap(skb, rxv, mode);
+ break;
+ case RX_ENC_HE:
+ mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
+ break;
+ default:
+ break;
+ }
+ }
- if (!status->wcid || !ieee80211_is_data_qos(fc))
+ if (!status->wcid || !ieee80211_is_data_qos(fc) || hw_aggr)
return 0;
status->aggr = unicast &&
@@ -905,9 +756,8 @@ mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi,
u32 val;
if (wcid->sta) {
- struct ieee80211_sta *sta;
+ struct ieee80211_sta *sta = wcid_to_sta(wcid);
- sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
wmm = sta->wme;
}
@@ -927,27 +777,40 @@ mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi,
FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
txwi[2] |= cpu_to_le32(val);
+
+ if (wcid->amsdu)
+ txwi[3] |= cpu_to_le32(MT_TXD3_HW_AMSDU);
}
static void
mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
- struct sk_buff *skb, struct ieee80211_key_conf *key)
+ struct sk_buff *skb,
+ struct ieee80211_key_conf *key,
+ struct mt76_wcid *wcid)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
bool multicast = is_multicast_ether_addr(hdr->addr1);
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
- __le16 fc = hdr->frame_control;
+ __le16 fc = hdr->frame_control, sc = hdr->seq_ctrl;
+ u16 seqno = le16_to_cpu(sc);
+ bool hw_bigtk = false;
u8 fc_type, fc_stype;
u32 val;
if (ieee80211_is_action(fc) &&
mgmt->u.action.category == WLAN_CATEGORY_BACK &&
- mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ)
+ mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
+ if (is_mt7990(&dev->mt76))
+ txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TID_ADDBA, tid));
+ else
+ txwi[7] |= cpu_to_le32(MT_TXD7_MAC_TXD);
+
tid = MT_TX_ADDBA;
- else if (ieee80211_is_mgmt(hdr->frame_control))
+ } else if (ieee80211_is_mgmt(hdr->frame_control)) {
tid = MT_TX_NORMAL;
+ }
val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
FIELD_PREP(MT_TXD1_HDR_INFO,
@@ -958,8 +821,11 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
info->flags & IEEE80211_TX_CTL_USE_MINRATE)
val |= MT_TXD1_FIXED_RATE;
- if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
- key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
+ if (is_mt7990(&dev->mt76) && ieee80211_is_beacon(fc) &&
+ (wcid->hw_key_idx2 == 6 || wcid->hw_key_idx2 == 7))
+ hw_bigtk = true;
+
+ if ((key && multicast && ieee80211_is_robust_mgmt_frame(skb)) || hw_bigtk) {
val |= MT_TXD1_BIP;
txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
}
@@ -972,6 +838,15 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
+ if (ieee80211_has_morefrags(fc) && ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_FIRST);
+ else if (ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_MID);
+ else if (!ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_LAST);
+ else
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_NONE);
+
txwi[2] |= cpu_to_le32(val);
txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast));
@@ -980,9 +855,13 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
}
- if (info->flags & IEEE80211_TX_CTL_INJECTED) {
- u16 seqno = le16_to_cpu(hdr->seq_ctrl);
+ if (multicast && ieee80211_vif_is_mld(info->control.vif)) {
+ val = MT_TXD3_SN_VALID |
+ FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
+ txwi[3] |= cpu_to_le32(val);
+ }
+ if (info->flags & IEEE80211_TX_CTL_INJECTED) {
if (ieee80211_is_back_req(hdr->frame_control)) {
struct ieee80211_bar *bar;
@@ -995,6 +874,19 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
txwi[3] |= cpu_to_le32(val);
txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU);
}
+
+ if (ieee80211_vif_is_mld(info->control.vif) &&
+ (multicast || unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))))
+ txwi[5] |= cpu_to_le32(MT_TXD5_FL);
+
+ if (ieee80211_is_nullfunc(fc) && ieee80211_has_a4(fc) &&
+ ieee80211_vif_is_mld(info->control.vif)) {
+ txwi[5] |= cpu_to_le32(MT_TXD5_FL);
+ txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT);
+ }
+
+ if (!wcid->sta && ieee80211_is_mgmt(fc))
+ txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT);
}
void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
@@ -1002,23 +894,40 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
struct ieee80211_key_conf *key, int pid,
enum mt76_txq_id qid, u32 changed)
{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
+ struct mt76_vif_link *mlink = NULL;
+ struct mt7996_vif *mvif;
+ unsigned int link_id;
u16 tx_count = 15;
u32 val;
- bool beacon = !!(changed & (BSS_CHANGED_BEACON |
- BSS_CHANGED_BEACON_ENABLED));
bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
BSS_CHANGED_FILS_DISCOVERY));
+ bool beacon = !!(changed & (BSS_CHANGED_BEACON |
+ BSS_CHANGED_BEACON_ENABLED)) && (!inband_disc);
+
+ if (wcid != &dev->mt76.global_wcid)
+ link_id = wcid->link_id;
+ else
+ link_id = u32_get_bits(info->control.flags,
+ IEEE80211_TX_CTRL_MLO_LINK);
- if (vif) {
- omac_idx = mvif->mt76.omac_idx;
- wmm_idx = mvif->mt76.wmm_idx;
- band_idx = mvif->mt76.band_idx;
+ mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL;
+ if (mvif) {
+ if (wcid->offchannel)
+ mlink = rcu_dereference(mvif->mt76.offchannel_link);
+ if (!mlink)
+ mlink = rcu_dereference(mvif->mt76.link[link_id]);
+ }
+
+ if (mlink) {
+ omac_idx = mlink->omac_idx;
+ wmm_idx = mlink->wmm_idx;
+ band_idx = mlink->band_idx;
}
if (inband_disc) {
@@ -1056,8 +965,6 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
val |= MT_TXD3_PROTECT_FRAME;
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
val |= MT_TXD3_NO_ACK;
- if (wcid->amsdu)
- val |= MT_TXD3_HW_AMSDU;
txwi[3] = cpu_to_le32(val);
txwi[4] = 0;
@@ -1067,32 +974,67 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
val |= MT_TXD5_TX_STATUS_HOST;
txwi[5] = cpu_to_le32(val);
- val = MT_TXD6_DIS_MAT | MT_TXD6_DAS |
- FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
+ val = MT_TXD6_DAS | MT_TXD6_VTA;
+ if ((q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0) ||
+ skb->protocol == cpu_to_be16(ETH_P_PAE))
+ val |= MT_TXD6_DIS_MAT;
+
+ if (is_mt7996(&dev->mt76))
+ val |= FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
+ else if (is_8023 || !ieee80211_is_mgmt(hdr->frame_control))
+ val |= FIELD_PREP(MT_TXD6_MSDU_CNT_V2, 1);
+
txwi[6] = cpu_to_le32(val);
txwi[7] = 0;
if (is_8023)
mt7996_mac_write_txwi_8023(dev, txwi, skb, wcid);
else
- mt7996_mac_write_txwi_80211(dev, txwi, skb, key);
+ mt7996_mac_write_txwi_80211(dev, txwi, skb, key, wcid);
if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) {
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
bool mcast = ieee80211_is_data(hdr->frame_control) &&
is_multicast_ether_addr(hdr->addr1);
- u8 idx = mvif->basic_rates_idx;
+ u8 idx = MT7996_BASIC_RATES_TBL;
- if (mcast && mvif->mcast_rates_idx)
- idx = mvif->mcast_rates_idx;
- else if (beacon && mvif->beacon_rates_idx)
- idx = mvif->beacon_rates_idx;
+ if (mlink) {
+ if (mcast && mlink->mcast_rates_idx)
+ idx = mlink->mcast_rates_idx;
+ else if (beacon && mlink->beacon_rates_idx)
+ idx = mlink->beacon_rates_idx;
+ else
+ idx = mlink->basic_rates_idx;
+ }
- txwi[6] |= FIELD_PREP(MT_TXD6_TX_RATE, idx);
+ val = FIELD_PREP(MT_TXD6_TX_RATE, idx) | MT_TXD6_FIXED_BW;
+ if (mcast)
+ val |= MT_TXD6_DIS_MAT;
+ txwi[6] |= cpu_to_le32(val);
txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
}
}
+static bool
+mt7996_tx_use_mgmt(struct mt7996_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ return true;
+
+ /* for SDO to bypass specific data frame */
+ if (!mt7996_has_wa(dev)) {
+ if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
+ return true;
+
+ if (ieee80211_has_a4(hdr->frame_control) &&
+ !ieee80211_is_data_present(hdr->frame_control))
+ return true;
+ }
+
+ return false;
+}
+
int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
@@ -1103,11 +1045,15 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
struct ieee80211_key_conf *key = info->control.hw_key;
struct ieee80211_vif *vif = info->control.vif;
- struct mt76_connac_txp_common *txp;
+ struct mt7996_vif *mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL;
+ struct mt7996_sta *msta = sta ? (struct mt7996_sta *)sta->drv_priv : NULL;
+ struct mt76_vif_link *mlink = NULL;
struct mt76_txwi_cache *t;
int id, i, pid, nbuf = tx_info->nbuf - 1;
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
+ __le32 *ptr = (__le32 *)txwi_ptr;
u8 *txwi = (u8 *)txwi_ptr;
+ u8 link_id;
if (unlikely(tx_info->skb->len <= ETH_HLEN))
return -EINVAL;
@@ -1115,12 +1061,27 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (!wcid)
wcid = &dev->mt76.global_wcid;
- if (sta) {
- struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ if ((is_8023 || ieee80211_is_data_qos(hdr->frame_control)) && sta->mlo &&
+ likely(tx_info->skb->protocol != cpu_to_be16(ETH_P_PAE))) {
+ u8 tid = tx_info->skb->priority & IEEE80211_QOS_CTL_TID_MASK;
- if (time_after(jiffies, msta->jiffies + HZ / 4)) {
- info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
- msta->jiffies = jiffies;
+ link_id = (tid % 2) ? msta->seclink_id : msta->deflink_id;
+ } else {
+ link_id = u32_get_bits(info->control.flags,
+ IEEE80211_TX_CTRL_MLO_LINK);
+ }
+
+ if (link_id != wcid->link_id && link_id != IEEE80211_LINK_UNSPECIFIED) {
+ if (msta) {
+ struct mt7996_sta_link *msta_link =
+ rcu_dereference(msta->link[link_id]);
+
+ if (msta_link)
+ wcid = &msta_link->wcid;
+ } else if (mvif) {
+ mlink = rcu_dereference(mvif->mt76.link[link_id]);
+ if (mlink && mlink->wcid)
+ wcid = mlink->wcid;
}
}
@@ -1131,38 +1092,117 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (id < 0)
return id;
- pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
- mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
- pid, qid, 0);
+ /* Since the rules of HW MLD address translation are not fully
+ * compatible with 802.11 EAPOL frame, we do the translation by
+ * software
+ */
+ if (tx_info->skb->protocol == cpu_to_be16(ETH_P_PAE) && sta->mlo) {
+ struct ieee80211_hdr *hdr = (void *)tx_info->skb->data;
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_sta *link_sta;
+
+ link_conf = rcu_dereference(vif->link_conf[wcid->link_id]);
+ if (!link_conf)
+ return -EINVAL;
- txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE);
- for (i = 0; i < nbuf; i++) {
- txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
- txp->fw.len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
+ link_sta = rcu_dereference(sta->link[wcid->link_id]);
+ if (!link_sta)
+ return -EINVAL;
+
+ dma_sync_single_for_cpu(mdev->dma_dev, tx_info->buf[1].addr,
+ tx_info->buf[1].len, DMA_TO_DEVICE);
+
+ memcpy(hdr->addr1, link_sta->addr, ETH_ALEN);
+ memcpy(hdr->addr2, link_conf->addr, ETH_ALEN);
+ if (ieee80211_has_a4(hdr->frame_control)) {
+ memcpy(hdr->addr3, sta->addr, ETH_ALEN);
+ memcpy(hdr->addr4, vif->addr, ETH_ALEN);
+ } else if (ieee80211_has_tods(hdr->frame_control)) {
+ memcpy(hdr->addr3, sta->addr, ETH_ALEN);
+ } else if (ieee80211_has_fromds(hdr->frame_control)) {
+ memcpy(hdr->addr3, vif->addr, ETH_ALEN);
+ }
+
+ dma_sync_single_for_device(mdev->dma_dev, tx_info->buf[1].addr,
+ tx_info->buf[1].len, DMA_TO_DEVICE);
}
- txp->fw.nbuf = nbuf;
- txp->fw.flags =
- cpu_to_le16(MT_CT_INFO_FROM_HOST | MT_CT_INFO_APPLY_TXD);
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+ memset(txwi_ptr, 0, MT_TXD_SIZE);
+ /* Transmit non qos data by 802.11 header and need to fill txd by host*/
+ if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
+ mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
+ pid, qid, 0);
+
+ /* MT7996 and MT7992 require driver to provide the MAC TXP for AddBA
+ * req
+ */
+ if (le32_to_cpu(ptr[7]) & MT_TXD7_MAC_TXD) {
+ u32 val;
+
+ ptr = (__le32 *)(txwi + MT_TXD_SIZE);
+ memset((void *)ptr, 0, sizeof(struct mt76_connac_fw_txp));
+
+ val = FIELD_PREP(MT_TXP0_TOKEN_ID0, id) |
+ MT_TXP0_TOKEN_ID0_VALID_MASK;
+ ptr[0] = cpu_to_le32(val);
+
+ val = FIELD_PREP(MT_TXP1_TID_ADDBA,
+ tx_info->skb->priority &
+ IEEE80211_QOS_CTL_TID_MASK);
+ ptr[1] = cpu_to_le32(val);
+ ptr[2] = cpu_to_le32(tx_info->buf[1].addr & 0xFFFFFFFF);
+
+ val = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[1].len) |
+ MT_TXP3_ML0_MASK;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ val |= FIELD_PREP(MT_TXP3_DMA_ADDR_H,
+ tx_info->buf[1].addr >> 32);
+#endif
+ ptr[3] = cpu_to_le32(val);
+ } else {
+ struct mt76_connac_txp_common *txp;
+
+ txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE);
+ for (i = 0; i < nbuf; i++) {
+ u16 len;
+
+ len = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[i + 1].len);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ len |= FIELD_PREP(MT_TXP_DMA_ADDR_H,
+ tx_info->buf[i + 1].addr >> 32);
+#endif
+
+ txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
+ txp->fw.len[i] = cpu_to_le16(len);
+ }
+ txp->fw.nbuf = nbuf;
+
+ txp->fw.flags = cpu_to_le16(MT_CT_INFO_FROM_HOST);
+
+ if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
+ txp->fw.flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD);
- if (!key)
- txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
+ if (!key)
+ txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
- if (!is_8023 && ieee80211_is_mgmt(hdr->frame_control))
- txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
+ if (!is_8023 && mt7996_tx_use_mgmt(dev, tx_info->skb))
+ txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
- if (vif) {
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ if (mvif) {
+ if (wcid->offchannel)
+ mlink = rcu_dereference(mvif->mt76.offchannel_link);
+ if (!mlink)
+ mlink = rcu_dereference(mvif->mt76.link[wcid->link_id]);
+
+ txp->fw.bss_idx = mlink ? mlink->idx : mvif->deflink.mt76.idx;
+ }
- txp->fw.bss_idx = mvif->mt76.idx;
+ txp->fw.token = cpu_to_le16(id);
+ txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff);
}
- txp->fw.token = cpu_to_le16(id);
- if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
- txp->fw.rept_wds_wcid = cpu_to_le16(wcid->idx);
- else
- txp->fw.rept_wds_wcid = cpu_to_le16(0xfff);
- tx_info->skb = DMA_DUMMY_DATA;
+ tx_info->skb = NULL;
/* pass partial skb header to fw */
tx_info->buf[1].len = MT_CT_PARSE_LEN;
@@ -1172,37 +1212,73 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
return 0;
}
+u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
+{
+ struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE;
+ __le32 *txwi = ptr;
+ u32 val;
+
+ memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp));
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) |
+ FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT);
+ txwi[0] = cpu_to_le32(val);
+
+ val = BIT(31) |
+ FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3);
+ txwi[1] = cpu_to_le32(val);
+
+ txp->token = cpu_to_le16(token_id);
+ txp->nbuf = 1;
+ txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp));
+
+ return MT_TXD_SIZE + sizeof(*txp);
+}
+
static void
-mt7996_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
+mt7996_tx_check_aggr(struct ieee80211_link_sta *link_sta,
+ struct mt76_wcid *wcid, struct sk_buff *skb)
{
- struct mt7996_sta *msta;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
u16 fc, tid;
- u32 val;
- if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
+ if (!(link_sta->ht_cap.ht_supported || link_sta->he_cap.has_he))
return;
- tid = le32_get_bits(txwi[1], MT_TXD1_TID);
+ tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
if (tid >= 6) /* skip VO queue */
return;
- val = le32_to_cpu(txwi[2]);
- fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
- FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
+ if (is_8023) {
+ fc = IEEE80211_FTYPE_DATA |
+ (link_sta->sta->wme ? IEEE80211_STYPE_QOS_DATA
+ : IEEE80211_STYPE_DATA);
+ } else {
+ /* No need to get precise TID for Action/Management Frame,
+ * since it will not meet the following Frame Control
+ * condition anyway.
+ */
+
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ fc = le16_to_cpu(hdr->frame_control) &
+ (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE);
+ }
+
if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
return;
- msta = (struct mt7996_sta *)sta->drv_priv;
- if (!test_and_set_bit(tid, &msta->ampdu_state))
- ieee80211_start_tx_ba_session(sta, tid, 0);
+ if (!test_and_set_bit(tid, &wcid->ampdu_state))
+ ieee80211_start_tx_ba_session(link_sta->sta, tid, 0);
}
static void
mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
- struct ieee80211_sta *sta, struct list_head *free_list)
+ struct ieee80211_link_sta *link_sta,
+ struct mt76_wcid *wcid, struct list_head *free_list)
{
struct mt76_dev *mdev = &dev->mt76;
- struct mt76_wcid *wcid;
__le32 *txwi;
u16 wcid_idx;
@@ -1211,14 +1287,18 @@ mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
goto out;
txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
- if (sta) {
- wcid = (struct mt76_wcid *)sta->drv_priv;
+ if (link_sta) {
wcid_idx = wcid->idx;
+ if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) {
+ struct mt7996_sta *msta;
- if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
- mt7996_tx_check_aggr(sta, txwi);
+ /* AMPDU state is stored in the primary link */
+ msta = (void *)link_sta->sta->drv_priv;
+ mt7996_tx_check_aggr(link_sta, &msta->deflink.wcid,
+ t->skb);
+ }
} else {
- wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
+ wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX);
}
__mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
@@ -1235,13 +1315,15 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
struct mt76_dev *mdev = &dev->mt76;
struct mt76_phy *phy2 = mdev->phys[MT_BAND1];
struct mt76_phy *phy3 = mdev->phys[MT_BAND2];
+ struct ieee80211_link_sta *link_sta = NULL;
struct mt76_txwi_cache *txwi;
- struct ieee80211_sta *sta = NULL;
+ struct mt76_wcid *wcid = NULL;
LIST_HEAD(free_list);
struct sk_buff *skb, *tmp;
void *end = data + len;
bool wake = false;
u16 total, count = 0;
+ u8 ver;
/* clean DMA queues and unmap buffers first */
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
@@ -1255,7 +1337,8 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false);
}
- if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 4))
+ ver = le32_get_bits(tx_free[1], MT_TXFREE1_VER);
+ if (WARN_ON_ONCE(ver < 5))
return;
total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT);
@@ -1270,26 +1353,60 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
*/
info = le32_to_cpu(*cur_info);
if (info & MT_TXFREE_INFO_PAIR) {
+ struct ieee80211_sta *sta;
+ unsigned long valid_links;
struct mt7996_sta *msta;
- struct mt76_wcid *wcid;
+ unsigned int id;
u16 idx;
idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info);
- wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ wcid = mt76_wcid_ptr(dev, idx);
sta = wcid_to_sta(wcid);
- if (!sta)
- continue;
+ if (!sta) {
+ link_sta = NULL;
+ goto next;
+ }
+
+ link_sta = rcu_dereference(sta->link[wcid->link_id]);
+ if (!link_sta)
+ goto next;
+
+ msta = (struct mt7996_sta *)sta->drv_priv;
+ valid_links = sta->valid_links ?: BIT(0);
+
+ /* For MLD STA, add all link's wcid to sta_poll_list */
+ for_each_set_bit(id, &valid_links,
+ IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct mt7996_sta_link *msta_link;
- msta = container_of(wcid, struct mt7996_sta, wcid);
- spin_lock_bh(&dev->sta_poll_lock);
- if (list_empty(&msta->poll_list))
- list_add_tail(&msta->poll_list, &dev->sta_poll_list);
- spin_unlock_bh(&dev->sta_poll_lock);
+ msta_link = rcu_dereference(msta->link[id]);
+ if (!msta_link)
+ continue;
+
+ mt76_wcid_add_poll(&dev->mt76,
+ &msta_link->wcid);
+ }
+next:
+ /* ver 7 has a new DW with pair = 1, skip it */
+ if (ver == 7 && ((void *)(cur_info + 1) < end) &&
+ (le32_to_cpu(*(cur_info + 1)) & MT_TXFREE_INFO_PAIR))
+ cur_info++;
continue;
- }
+ } else if (info & MT_TXFREE_INFO_HEADER) {
+ u32 tx_retries = 0, tx_failed = 0;
+
+ if (!wcid)
+ continue;
- if (info & MT_TXFREE_INFO_HEADER)
+ tx_retries =
+ FIELD_GET(MT_TXFREE_INFO_COUNT, info) - 1;
+ tx_failed = tx_retries +
+ !!FIELD_GET(MT_TXFREE_INFO_STAT, info);
+
+ wcid->stats.tx_retries += tx_retries;
+ wcid->stats.tx_failed += tx_failed;
continue;
+ }
for (i = 0; i < 2; i++) {
msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID;
@@ -1301,7 +1418,8 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
if (!txwi)
continue;
- mt7996_txwi_free(dev, txwi, sta, &free_list);
+ mt7996_txwi_free(dev, txwi, link_sta, wcid,
+ &free_list);
}
}
@@ -1319,35 +1437,48 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
}
static bool
-mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, int pid,
- __le32 *txs_data, struct mt76_sta_stats *stats)
+mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid,
+ int pid, __le32 *txs_data)
{
+ struct mt76_sta_stats *stats = &wcid->stats;
struct ieee80211_supported_band *sband;
struct mt76_dev *mdev = &dev->mt76;
struct mt76_phy *mphy;
struct ieee80211_tx_info *info;
struct sk_buff_head list;
struct rate_info rate = {};
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
bool cck = false;
u32 txrate, txs, mode, stbc;
+ txs = le32_to_cpu(txs_data[0]);
+
mt76_tx_status_lock(mdev, &list);
- skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
- if (!skb)
- goto out_no_skb;
- txs = le32_to_cpu(txs_data[0]);
+ /* only report MPDU TXS */
+ if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) == 0) {
+ skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
+ if (skb) {
+ info = IEEE80211_SKB_CB(skb);
+ if (!(txs & MT_TXS0_ACK_ERROR_MASK))
+ info->flags |= IEEE80211_TX_STAT_ACK;
- info = IEEE80211_SKB_CB(skb);
- if (!(txs & MT_TXS0_ACK_ERROR_MASK))
- info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ampdu_len = 1;
+ info->status.ampdu_ack_len =
+ !!(info->flags & IEEE80211_TX_STAT_ACK);
- info->status.ampdu_len = 1;
- info->status.ampdu_ack_len = !!(info->flags &
- IEEE80211_TX_STAT_ACK);
+ info->status.rates[0].idx = -1;
+ }
+ }
- info->status.rates[0].idx = -1;
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wcid->sta) {
+ struct ieee80211_sta *sta;
+ u8 tid;
+
+ sta = wcid_to_sta(wcid);
+ tid = FIELD_GET(MT_TXS0_TID, txs);
+ ieee80211_refresh_tx_agg_session_timer(sta, tid);
+ }
txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
@@ -1395,6 +1526,8 @@ mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, int pid,
goto out;
rate.flags = RATE_INFO_FLAGS_VHT_MCS;
+ if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
+ rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
break;
case MT_PHY_TYPE_HE_SU:
case MT_PHY_TYPE_HE_EXT_SU:
@@ -1447,9 +1580,8 @@ mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, int pid,
wcid->rate = rate;
out:
- mt76_tx_status_skb_done(mdev, skb, &list);
-
-out_no_skb:
+ if (skb)
+ mt76_tx_status_skb_done(mdev, skb, &list);
mt76_tx_status_unlock(mdev, &list);
return !!skb;
@@ -1457,41 +1589,31 @@ out_no_skb:
static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data)
{
- struct mt7996_sta *msta = NULL;
+ struct mt7996_sta_link *msta_link;
struct mt76_wcid *wcid;
__le32 *txs_data = data;
u16 wcidx;
u8 pid;
- if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
- return;
-
wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
- if (pid < MT_PACKET_ID_FIRST)
- return;
-
- if (wcidx >= mt7996_wtbl_size(dev))
+ if (pid < MT_PACKET_ID_NO_SKB)
return;
rcu_read_lock();
- wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
+ wcid = mt76_wcid_ptr(dev, wcidx);
if (!wcid)
goto out;
- msta = container_of(wcid, struct mt7996_sta, wcid);
-
- mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data, &msta->stats);
+ mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data);
if (!wcid->sta)
goto out;
- spin_lock_bh(&dev->sta_poll_lock);
- if (list_empty(&msta->poll_list))
- list_add_tail(&msta->poll_list, &dev->sta_poll_list);
- spin_unlock_bh(&dev->sta_poll_lock);
+ msta_link = container_of(wcid, struct mt7996_sta_link, wcid);
+ mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid);
out:
rcu_read_unlock();
@@ -1518,7 +1640,7 @@ bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len)
mt7996_mac_tx_free(dev, data, len);
return false;
case PKT_TYPE_TXS:
- for (rxd += 4; rxd + 8 <= end; rxd += 8)
+ for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE)
mt7996_mac_add_txs(dev, rxd);
return false;
case PKT_TYPE_RX_FW_MONITOR:
@@ -1548,6 +1670,12 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
switch (type) {
case PKT_TYPE_TXRX_NOTIFY:
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2) &&
+ q == MT_RXQ_TXFREE_BAND2) {
+ dev_kfree_skb(skb);
+ break;
+ }
+
mt7996_mac_tx_free(dev, skb->data, skb->len);
napi_consume_skb(skb, 1);
break;
@@ -1555,7 +1683,7 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
mt7996_mcu_rx_event(dev, skb);
break;
case PKT_TYPE_TXS:
- for (rxd += 4; rxd + 8 <= end; rxd += 8)
+ for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE)
mt7996_mac_add_txs(dev, rxd);
dev_kfree_skb(skb);
break;
@@ -1564,7 +1692,7 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
dev_kfree_skb(skb);
break;
case PKT_TYPE_NORMAL:
- if (!mt7996_mac_fill_rx(dev, skb)) {
+ if (!mt7996_mac_fill_rx(dev, q, skb, info)) {
mt76_rx(&dev->mt76, q, skb);
return;
}
@@ -1575,6 +1703,362 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
}
}
+static struct mt7996_msdu_page *
+mt7996_msdu_page_get_from_cache(struct mt7996_dev *dev)
+{
+ struct mt7996_msdu_page *p = NULL;
+
+ spin_lock(&dev->wed_rro.lock);
+
+ if (!list_empty(&dev->wed_rro.page_cache)) {
+ p = list_first_entry(&dev->wed_rro.page_cache,
+ struct mt7996_msdu_page, list);
+ list_del(&p->list);
+ }
+
+ spin_unlock(&dev->wed_rro.lock);
+
+ return p;
+}
+
+static struct mt7996_msdu_page *mt7996_msdu_page_get(struct mt7996_dev *dev)
+{
+ struct mt7996_msdu_page *p;
+
+ p = mt7996_msdu_page_get_from_cache(dev);
+ if (!p) {
+ p = kzalloc(L1_CACHE_ALIGN(sizeof(*p)), GFP_ATOMIC);
+ if (p)
+ INIT_LIST_HEAD(&p->list);
+ }
+
+ return p;
+}
+
+static void mt7996_msdu_page_put_to_cache(struct mt7996_dev *dev,
+ struct mt7996_msdu_page *p)
+{
+ if (p->buf) {
+ mt76_put_page_pool_buf(p->buf, false);
+ p->buf = NULL;
+ }
+
+ spin_lock(&dev->wed_rro.lock);
+ list_add(&p->list, &dev->wed_rro.page_cache);
+ spin_unlock(&dev->wed_rro.lock);
+}
+
+static void mt7996_msdu_page_free_cache(struct mt7996_dev *dev)
+{
+ while (true) {
+ struct mt7996_msdu_page *p;
+
+ p = mt7996_msdu_page_get_from_cache(dev);
+ if (!p)
+ break;
+
+ if (p->buf)
+ mt76_put_page_pool_buf(p->buf, false);
+
+ kfree(p);
+ }
+}
+
+static u32 mt7996_msdu_page_hash_from_addr(dma_addr_t dma_addr)
+{
+ u32 val = 0;
+ int i = 0;
+
+ while (dma_addr) {
+ val += (u32)((dma_addr & 0xff) + i) % MT7996_RRO_MSDU_PG_HASH_SIZE;
+ dma_addr >>= 8;
+ i += 13;
+ }
+
+ return val % MT7996_RRO_MSDU_PG_HASH_SIZE;
+}
+
+static struct mt7996_msdu_page *
+mt7996_rro_msdu_page_get(struct mt7996_dev *dev, dma_addr_t dma_addr)
+{
+ u32 hash = mt7996_msdu_page_hash_from_addr(dma_addr);
+ struct mt7996_msdu_page *p, *tmp, *addr = NULL;
+
+ spin_lock(&dev->wed_rro.lock);
+
+ list_for_each_entry_safe(p, tmp, &dev->wed_rro.page_map[hash],
+ list) {
+ if (p->dma_addr == dma_addr) {
+ list_del(&p->list);
+ addr = p;
+ break;
+ }
+ }
+
+ spin_unlock(&dev->wed_rro.lock);
+
+ return addr;
+}
+
+static void mt7996_rx_token_put(struct mt7996_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->mt76.rx_token_size; i++) {
+ struct mt76_txwi_cache *t;
+
+ t = mt76_rx_token_release(&dev->mt76, i);
+ if (!t || !t->ptr)
+ continue;
+
+ mt76_put_page_pool_buf(t->ptr, false);
+ t->dma_addr = 0;
+ t->ptr = NULL;
+
+ mt76_put_rxwi(&dev->mt76, t);
+ }
+}
+
+void mt7996_rro_msdu_page_map_free(struct mt7996_dev *dev)
+{
+ struct mt7996_msdu_page *p, *tmp;
+ int i;
+
+ local_bh_disable();
+
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.page_map); i++) {
+ list_for_each_entry_safe(p, tmp, &dev->wed_rro.page_map[i],
+ list) {
+ list_del_init(&p->list);
+ if (p->buf)
+ mt76_put_page_pool_buf(p->buf, false);
+ kfree(p);
+ }
+ }
+ mt7996_msdu_page_free_cache(dev);
+
+ local_bh_enable();
+
+ mt7996_rx_token_put(dev);
+}
+
+int mt7996_rro_msdu_page_add(struct mt76_dev *mdev, struct mt76_queue *q,
+ dma_addr_t dma_addr, void *data)
+{
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ struct mt7996_msdu_page_info *pinfo = data;
+ struct mt7996_msdu_page *p;
+ u32 hash;
+
+ pinfo->data |= cpu_to_le32(FIELD_PREP(MSDU_PAGE_INFO_OWNER_MASK, 1));
+ p = mt7996_msdu_page_get(dev);
+ if (!p)
+ return -ENOMEM;
+
+ p->buf = data;
+ p->dma_addr = dma_addr;
+ p->q = q;
+
+ hash = mt7996_msdu_page_hash_from_addr(dma_addr);
+
+ spin_lock(&dev->wed_rro.lock);
+ list_add_tail(&p->list, &dev->wed_rro.page_map[hash]);
+ spin_unlock(&dev->wed_rro.lock);
+
+ return 0;
+}
+
+static struct mt7996_wed_rro_addr *
+mt7996_rro_addr_elem_get(struct mt7996_dev *dev, u16 session_id, u16 seq_num)
+{
+ u32 idx = 0;
+ void *addr;
+
+ if (session_id == MT7996_RRO_MAX_SESSION) {
+ addr = dev->wed_rro.session.ptr;
+ } else {
+ idx = session_id / MT7996_RRO_BA_BITMAP_SESSION_SIZE;
+ addr = dev->wed_rro.addr_elem[idx].ptr;
+
+ idx = session_id % MT7996_RRO_BA_BITMAP_SESSION_SIZE;
+ idx = idx * MT7996_RRO_WINDOW_MAX_LEN;
+ }
+ idx += seq_num % MT7996_RRO_WINDOW_MAX_LEN;
+
+ return addr + idx * sizeof(struct mt7996_wed_rro_addr);
+}
+
+#define MT996_RRO_SN_MASK GENMASK(11, 0)
+
+void mt7996_rro_rx_process(struct mt76_dev *mdev, void *data)
+{
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ struct mt76_wed_rro_ind *cmd = (struct mt76_wed_rro_ind *)data;
+ u32 cmd_data0 = le32_to_cpu(cmd->data0);
+ u32 cmd_data1 = le32_to_cpu(cmd->data1);
+ u8 ind_reason = FIELD_GET(RRO_IND_DATA0_IND_REASON_MASK, cmd_data0);
+ u16 start_seq = FIELD_GET(RRO_IND_DATA0_START_SEQ_MASK, cmd_data0);
+ u16 seq_id = FIELD_GET(RRO_IND_DATA0_SEQ_ID_MASK, cmd_data0);
+ u16 ind_count = FIELD_GET(RRO_IND_DATA1_IND_COUNT_MASK, cmd_data1);
+ struct mt7996_msdu_page_info *pinfo = NULL;
+ struct mt7996_msdu_page *p = NULL;
+ int i, seq_num = 0;
+
+ for (i = 0; i < ind_count; i++) {
+ struct mt7996_wed_rro_addr *e;
+ struct mt76_rx_status *status;
+ struct mt7996_rro_hif *rxd;
+ int j, len, qid, data_len;
+ struct mt76_txwi_cache *t;
+ dma_addr_t dma_addr = 0;
+ u16 rx_token_id, count;
+ struct mt76_queue *q;
+ struct sk_buff *skb;
+ u32 info = 0, data;
+ u8 signature;
+ void *buf;
+ bool ls;
+
+ seq_num = FIELD_GET(MT996_RRO_SN_MASK, start_seq + i);
+ e = mt7996_rro_addr_elem_get(dev, seq_id, seq_num);
+ data = le32_to_cpu(e->data);
+ signature = FIELD_GET(WED_RRO_ADDR_SIGNATURE_MASK, data);
+ if (signature != (seq_num / MT7996_RRO_WINDOW_MAX_LEN)) {
+ u32 val = FIELD_PREP(WED_RRO_ADDR_SIGNATURE_MASK,
+ 0xff);
+
+ e->data |= cpu_to_le32(val);
+ goto update_ack_seq_num;
+ }
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ dma_addr = FIELD_GET(WED_RRO_ADDR_HEAD_HIGH_MASK, data);
+ dma_addr <<= 32;
+#endif
+ dma_addr |= le32_to_cpu(e->head_low);
+
+ count = FIELD_GET(WED_RRO_ADDR_COUNT_MASK, data);
+ for (j = 0; j < count; j++) {
+ if (!p) {
+ p = mt7996_rro_msdu_page_get(dev, dma_addr);
+ if (!p)
+ continue;
+
+ dma_sync_single_for_cpu(mdev->dma_dev, p->dma_addr,
+ SKB_WITH_OVERHEAD(p->q->buf_size),
+ page_pool_get_dma_dir(p->q->page_pool));
+ pinfo = (struct mt7996_msdu_page_info *)p->buf;
+ }
+
+ rxd = &pinfo->rxd[j % MT7996_MAX_HIF_RXD_IN_PG];
+ len = FIELD_GET(RRO_HIF_DATA1_SDL_MASK,
+ le32_to_cpu(rxd->data1));
+
+ rx_token_id = FIELD_GET(RRO_HIF_DATA4_RX_TOKEN_ID_MASK,
+ le32_to_cpu(rxd->data4));
+ t = mt76_rx_token_release(mdev, rx_token_id);
+ if (!t)
+ goto next_page;
+
+ qid = t->qid;
+ buf = t->ptr;
+ q = &mdev->q_rx[qid];
+ dma_sync_single_for_cpu(mdev->dma_dev, t->dma_addr,
+ SKB_WITH_OVERHEAD(q->buf_size),
+ page_pool_get_dma_dir(q->page_pool));
+
+ t->dma_addr = 0;
+ t->ptr = NULL;
+ mt76_put_rxwi(mdev, t);
+ if (!buf)
+ goto next_page;
+
+ if (q->rx_head)
+ data_len = q->buf_size;
+ else
+ data_len = SKB_WITH_OVERHEAD(q->buf_size);
+
+ if (data_len < len + q->buf_offset) {
+ dev_kfree_skb(q->rx_head);
+ mt76_put_page_pool_buf(buf, false);
+ q->rx_head = NULL;
+ goto next_page;
+ }
+
+ ls = FIELD_GET(RRO_HIF_DATA1_LS_MASK,
+ le32_to_cpu(rxd->data1));
+ if (q->rx_head) {
+ /* TODO: Take into account non-linear skb. */
+ mt76_put_page_pool_buf(buf, false);
+ if (ls) {
+ dev_kfree_skb(q->rx_head);
+ q->rx_head = NULL;
+ }
+ goto next_page;
+ }
+
+ if (ls && !mt7996_rx_check(mdev, buf, len))
+ goto next_page;
+
+ skb = build_skb(buf, q->buf_size);
+ if (!skb)
+ goto next_page;
+
+ skb_reserve(skb, q->buf_offset);
+ skb_mark_for_recycle(skb);
+ __skb_put(skb, len);
+
+ if (ind_reason == 1 || ind_reason == 2) {
+ dev_kfree_skb(skb);
+ goto next_page;
+ }
+
+ if (!ls) {
+ q->rx_head = skb;
+ goto next_page;
+ }
+
+ status = (struct mt76_rx_status *)skb->cb;
+ if (seq_id != MT7996_RRO_MAX_SESSION)
+ status->aggr = true;
+
+ mt7996_queue_rx_skb(mdev, qid, skb, &info);
+next_page:
+ if ((j + 1) % MT7996_MAX_HIF_RXD_IN_PG == 0) {
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ dma_addr =
+ FIELD_GET(MSDU_PAGE_INFO_PG_HIGH_MASK,
+ le32_to_cpu(pinfo->data));
+ dma_addr <<= 32;
+ dma_addr |= le32_to_cpu(pinfo->pg_low);
+#else
+ dma_addr = le32_to_cpu(pinfo->pg_low);
+#endif
+ mt7996_msdu_page_put_to_cache(dev, p);
+ p = NULL;
+ }
+ }
+
+update_ack_seq_num:
+ if ((i + 1) % 4 == 0)
+ mt76_wr(dev, MT_RRO_ACK_SN_CTRL,
+ FIELD_PREP(MT_RRO_ACK_SN_CTRL_SESSION_MASK,
+ seq_id) |
+ FIELD_PREP(MT_RRO_ACK_SN_CTRL_SN_MASK,
+ seq_num));
+ if (p) {
+ mt7996_msdu_page_put_to_cache(dev, p);
+ p = NULL;
+ }
+ }
+
+ /* Update ack_seq_num for remaining addr_elem */
+ if (i % 4)
+ mt76_wr(dev, MT_RRO_ACK_SN_CTRL,
+ FIELD_PREP(MT_RRO_ACK_SN_CTRL_SESSION_MASK, seq_id) |
+ FIELD_PREP(MT_RRO_ACK_SN_CTRL_SN_MASK, seq_num));
+}
+
void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy)
{
struct mt7996_dev *dev = phy->dev;
@@ -1604,20 +2088,19 @@ void mt7996_mac_reset_counters(struct mt7996_phy *phy)
mt7996_mcu_get_chan_mib_info(phy, true);
}
-void mt7996_mac_set_timing(struct mt7996_phy *phy)
+void mt7996_mac_set_coverage_class(struct mt7996_phy *phy)
{
s16 coverage_class = phy->coverage_class;
struct mt7996_dev *dev = phy->dev;
struct mt7996_phy *phy2 = mt7996_phy2(dev);
struct mt7996_phy *phy3 = mt7996_phy3(dev);
- u32 val, reg_offset;
+ u32 reg_offset;
u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
u8 band_idx = phy->mt76->band_idx;
int offset;
- bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ);
if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
return;
@@ -1630,34 +2113,12 @@ void mt7996_mac_set_timing(struct mt7996_phy *phy)
coverage_class = max_t(s16, coverage_class,
phy3->coverage_class);
- mt76_set(dev, MT_ARB_SCR(band_idx),
- MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
- udelay(1);
-
offset = 3 * coverage_class;
reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
mt76_wr(dev, MT_TMAC_CDTR(band_idx), cck + reg_offset);
mt76_wr(dev, MT_TMAC_ODTR(band_idx), ofdm + reg_offset);
- mt76_wr(dev, MT_TMAC_ICR0(band_idx),
- FIELD_PREP(MT_IFS_EIFS_OFDM, a_band ? 84 : 78) |
- FIELD_PREP(MT_IFS_RIFS, 2) |
- FIELD_PREP(MT_IFS_SIFS, 10) |
- FIELD_PREP(MT_IFS_SLOT, phy->slottime));
-
- if (!a_band)
- mt76_wr(dev, MT_TMAC_ICR1(band_idx),
- FIELD_PREP(MT_IFS_EIFS_CCK, 314));
-
- if (phy->slottime < 20 || a_band)
- val = MT7996_CFEND_RATE_DEFAULT;
- else
- val = MT7996_CFEND_RATE_11B;
-
- mt76_rmw_field(dev, MT_RATE_HRCR0(band_idx), MT_RATE_HRCR0_CFEND_RATE, val);
- mt76_clear(dev, MT_ARB_SCR(band_idx),
- MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
}
void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band)
@@ -1693,7 +2154,7 @@ mt7996_phy_get_nf(struct mt7996_phy *phy, u8 band_idx)
void mt7996_update_channel(struct mt76_phy *mphy)
{
- struct mt7996_phy *phy = (struct mt7996_phy *)mphy->priv;
+ struct mt7996_phy *phy = mphy->priv;
struct mt76_channel_state *state = mphy->chan_state;
int nf;
@@ -1724,17 +2185,38 @@ mt7996_wait_reset_state(struct mt7996_dev *dev, u32 state)
static void
mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
- struct ieee80211_hw *hw = priv;
+ struct ieee80211_bss_conf *link_conf;
+ struct mt7996_phy *phy = priv;
+ struct mt7996_dev *dev = phy->dev;
+ unsigned int link_id;
+
switch (vif->type) {
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_AP:
- mt7996_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon);
break;
default:
- break;
+ return;
}
+
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ struct mt7996_vif_link *link;
+
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link || link->phy != phy)
+ continue;
+
+ mt7996_mcu_add_beacon(dev->mt76.hw, vif, link_conf,
+ link_conf->enable_beacon);
+ }
+}
+
+void mt7996_mac_update_beacons(struct mt7996_phy *phy)
+{
+ ieee80211_iterate_active_interfaces(phy->mt76->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7996_update_vif_beacon, phy);
}
static void
@@ -1742,25 +2224,15 @@ mt7996_update_beacons(struct mt7996_dev *dev)
{
struct mt76_phy *phy2, *phy3;
- ieee80211_iterate_active_interfaces(dev->mt76.hw,
- IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7996_update_vif_beacon, dev->mt76.hw);
+ mt7996_mac_update_beacons(&dev->phy);
phy2 = dev->mt76.phys[MT_BAND1];
- if (!phy2)
- return;
-
- ieee80211_iterate_active_interfaces(phy2->hw,
- IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7996_update_vif_beacon, phy2->hw);
+ if (phy2)
+ mt7996_mac_update_beacons(phy2->priv);
phy3 = dev->mt76.phys[MT_BAND2];
- if (!phy3)
- return;
-
- ieee80211_iterate_active_interfaces(phy3->hw,
- IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7996_update_vif_beacon, phy3->hw);
+ if (phy3)
+ mt7996_mac_update_beacons(phy3->priv);
}
void mt7996_tx_token_put(struct mt7996_dev *dev)
@@ -1770,7 +2242,7 @@ void mt7996_tx_token_put(struct mt7996_dev *dev)
spin_lock_bh(&dev->mt76.token_lock);
idr_for_each_entry(&dev->mt76.token, txwi, id) {
- mt7996_txwi_free(dev, txwi, NULL, NULL);
+ mt7996_txwi_free(dev, txwi, NULL, NULL, NULL);
dev->mt76.token_count--;
}
spin_unlock_bh(&dev->mt76.token_lock);
@@ -1780,13 +2252,10 @@ void mt7996_tx_token_put(struct mt7996_dev *dev)
static int
mt7996_mac_restart(struct mt7996_dev *dev)
{
- struct mt7996_phy *phy2, *phy3;
struct mt76_dev *mdev = &dev->mt76;
+ struct mt7996_phy *phy;
int i, ret;
- phy2 = mt7996_phy2(dev);
- phy3 = mt7996_phy3(dev);
-
if (dev->hif2) {
mt76_wr(dev, MT_INT1_MASK_CSR, 0x0);
mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
@@ -1798,28 +2267,22 @@ mt7996_mac_restart(struct mt7996_dev *dev)
mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0);
}
- set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ mt7996_for_each_phy(dev, phy)
+ set_bit(MT76_RESET, &phy->mt76->state);
wake_up(&dev->mt76.mcu.wait);
- if (phy2) {
- set_bit(MT76_RESET, &phy2->mt76->state);
- set_bit(MT76_MCU_RESET, &phy2->mt76->state);
- }
- if (phy3) {
- set_bit(MT76_RESET, &phy3->mt76->state);
- set_bit(MT76_MCU_RESET, &phy3->mt76->state);
- }
/* lock/unlock all queues to ensure that no tx is pending */
- mt76_txq_schedule_all(&dev->mphy);
- if (phy2)
- mt76_txq_schedule_all(phy2->mt76);
- if (phy3)
- mt76_txq_schedule_all(phy3->mt76);
+ mt7996_for_each_phy(dev, phy)
+ mt76_txq_schedule_all(phy->mt76);
/* disable all tx/rx napi */
mt76_worker_disable(&dev->mt76.tx_worker);
mt76_for_each_q_rx(mdev, i) {
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
+ mt76_queue_is_wed_rro(&mdev->q_rx[i]))
+ continue;
+
if (mdev->q_rx[i].ndesc)
napi_disable(&dev->mt76.napi[i]);
}
@@ -1831,14 +2294,18 @@ mt7996_mac_restart(struct mt7996_dev *dev)
mt7996_dma_reset(dev, true);
- local_bh_disable();
mt76_for_each_q_rx(mdev, i) {
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
+ mt76_queue_is_wed_rro(&mdev->q_rx[i]))
+ continue;
+
if (mdev->q_rx[i].ndesc) {
napi_enable(&dev->mt76.napi[i]);
+ local_bh_disable();
napi_schedule(&dev->mt76.napi[i]);
+ local_bh_enable();
}
}
- local_bh_enable();
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
@@ -1859,45 +2326,60 @@ mt7996_mac_restart(struct mt7996_dev *dev)
if (ret)
goto out;
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
+ mt7996_has_hwrro(dev)) {
+ u32 wed_irq_mask = dev->mt76.mmio.irqmask |
+ MT_INT_TX_DONE_BAND2;
+
+ mt7996_rro_hw_init(dev);
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ if (mt76_queue_is_wed_rro_ind(&dev->mt76.q_rx[i]) ||
+ mt76_queue_is_wed_rro_msdu_pg(&dev->mt76.q_rx[i]))
+ mt76_queue_rx_reset(dev, i);
+ }
+
+ mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
+ mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask,
+ false);
+ mt7996_irq_enable(dev, wed_irq_mask);
+ mt7996_irq_disable(dev, 0);
+ }
+
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) {
+ mt76_wr(dev, MT_INT_PCIE1_MASK_CSR,
+ MT_INT_TX_RX_DONE_EXT);
+ mtk_wed_device_start(&dev->mt76.mmio.wed_hif2,
+ MT_INT_TX_RX_DONE_EXT);
+ }
+
/* set the necessary init items */
ret = mt7996_mcu_set_eeprom(dev);
if (ret)
goto out;
mt7996_mac_init(dev);
- mt7996_init_txpower(dev, &dev->mphy.sband_2g.sband);
- mt7996_init_txpower(dev, &dev->mphy.sband_5g.sband);
- mt7996_init_txpower(dev, &dev->mphy.sband_6g.sband);
+ mt7996_for_each_phy(dev, phy)
+ mt7996_init_txpower(phy);
ret = mt7996_txbf_init(dev);
+ if (ret)
+ goto out;
- if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
- ret = mt7996_run(dev->mphy.hw);
- if (ret)
- goto out;
- }
-
- if (phy2 && test_bit(MT76_STATE_RUNNING, &phy2->mt76->state)) {
- ret = mt7996_run(phy2->mt76->hw);
- if (ret)
- goto out;
- }
+ mt7996_for_each_phy(dev, phy) {
+ if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
+ continue;
- if (phy3 && test_bit(MT76_STATE_RUNNING, &phy3->mt76->state)) {
- ret = mt7996_run(phy3->mt76->hw);
+ ret = mt7996_run(phy);
if (ret)
goto out;
}
out:
/* reset done */
- clear_bit(MT76_RESET, &dev->mphy.state);
- if (phy2)
- clear_bit(MT76_RESET, &phy2->mt76->state);
- if (phy3)
- clear_bit(MT76_RESET, &phy3->mt76->state);
+ mt7996_for_each_phy(dev, phy)
+ clear_bit(MT76_RESET, &phy->mt76->state);
- local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
+ local_bh_disable();
napi_schedule(&dev->mt76.tx_napi);
local_bh_enable();
@@ -1906,73 +2388,123 @@ out:
}
static void
+mt7996_mac_reset_sta_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_dev *dev = data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(msta->link); i++) {
+ struct mt7996_sta_link *msta_link = NULL;
+
+ msta_link = rcu_replace_pointer(msta->link[i], msta_link,
+ lockdep_is_held(&dev->mt76.mutex));
+ if (!msta_link)
+ continue;
+
+ mt7996_mac_sta_deinit_link(dev, msta_link);
+
+ if (msta->deflink_id == i) {
+ msta->deflink_id = IEEE80211_LINK_UNSPECIFIED;
+ continue;
+ }
+
+ kfree_rcu(msta_link, rcu_head);
+ }
+}
+
+static void
+mt7996_mac_reset_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
+ struct mt76_vif_data *mvif = mlink->mvif;
+ struct mt7996_dev *dev = data;
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < ARRAY_SIZE(mvif->link); i++) {
+
+ mlink = mt76_dereference(mvif->link[i], &dev->mt76);
+ if (!mlink || mlink == (struct mt76_vif_link *)vif->drv_priv)
+ continue;
+
+ rcu_assign_pointer(mvif->link[i], NULL);
+ kfree_rcu(mlink, rcu_head);
+ }
+ rcu_read_unlock();
+}
+
+static void
mt7996_mac_full_reset(struct mt7996_dev *dev)
{
- struct mt7996_phy *phy2, *phy3;
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt7996_phy *phy;
+ LIST_HEAD(list);
int i;
- phy2 = mt7996_phy2(dev);
- phy3 = mt7996_phy3(dev);
dev->recovery.hw_full_reset = true;
wake_up(&dev->mt76.mcu.wait);
- ieee80211_stop_queues(mt76_hw(dev));
- if (phy2)
- ieee80211_stop_queues(phy2->mt76->hw);
- if (phy3)
- ieee80211_stop_queues(phy3->mt76->hw);
+ ieee80211_stop_queues(hw);
- cancel_delayed_work_sync(&dev->mphy.mac_work);
- if (phy2)
- cancel_delayed_work_sync(&phy2->mt76->mac_work);
- if (phy3)
- cancel_delayed_work_sync(&phy3->mt76->mac_work);
+ cancel_work_sync(&dev->wed_rro.work);
+ mt7996_for_each_phy(dev, phy)
+ cancel_delayed_work_sync(&phy->mt76->mac_work);
+
+ mt76_abort_scan(&dev->mt76);
mutex_lock(&dev->mt76.mutex);
for (i = 0; i < 10; i++) {
if (!mt7996_mac_restart(dev))
break;
}
- mutex_unlock(&dev->mt76.mutex);
if (i == 10)
dev_err(dev->mt76.dev, "chip full reset failed\n");
- ieee80211_restart_hw(mt76_hw(dev));
- if (phy2)
- ieee80211_restart_hw(phy2->mt76->hw);
- if (phy3)
- ieee80211_restart_hw(phy3->mt76->hw);
+ mt7996_for_each_phy(dev, phy)
+ phy->omac_mask = 0;
- ieee80211_wake_queues(mt76_hw(dev));
- if (phy2)
- ieee80211_wake_queues(phy2->mt76->hw);
- if (phy3)
- ieee80211_wake_queues(phy3->mt76->hw);
+ ieee80211_iterate_stations_atomic(hw, mt7996_mac_reset_sta_iter, dev);
+ ieee80211_iterate_active_interfaces_atomic(hw,
+ IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER,
+ mt7996_mac_reset_vif_iter, dev);
+ mt76_reset_device(&dev->mt76);
+
+ INIT_LIST_HEAD(&dev->sta_rc_list);
+ INIT_LIST_HEAD(&dev->twt_list);
+
+ spin_lock_bh(&dev->wed_rro.lock);
+ list_splice_init(&dev->wed_rro.poll_list, &list);
+ spin_unlock_bh(&dev->wed_rro.lock);
+
+ while (!list_empty(&list)) {
+ struct mt7996_wed_rro_session_id *e;
+ e = list_first_entry(&list, struct mt7996_wed_rro_session_id,
+ list);
+ list_del_init(&e->list);
+ kfree(e);
+ }
+
+ i = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7996_WTBL_STA);
+ dev->mt76.global_wcid.idx = i;
dev->recovery.hw_full_reset = false;
- ieee80211_queue_delayed_work(mt76_hw(dev),
- &dev->mphy.mac_work,
- MT7996_WATCHDOG_TIME);
- if (phy2)
- ieee80211_queue_delayed_work(phy2->mt76->hw,
- &phy2->mt76->mac_work,
- MT7996_WATCHDOG_TIME);
- if (phy3)
- ieee80211_queue_delayed_work(phy3->mt76->hw,
- &phy3->mt76->mac_work,
- MT7996_WATCHDOG_TIME);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ ieee80211_restart_hw(mt76_hw(dev));
}
void mt7996_mac_reset_work(struct work_struct *work)
{
- struct mt7996_phy *phy2, *phy3;
+ struct ieee80211_hw *hw;
struct mt7996_dev *dev;
+ struct mt7996_phy *phy;
int i;
dev = container_of(work, struct mt7996_dev, reset_work);
- phy2 = mt7996_phy2(dev);
- phy3 = mt7996_phy3(dev);
+ hw = mt76_hw(dev);
/* chip full reset */
if (dev->recovery.restart) {
@@ -2003,32 +2535,42 @@ void mt7996_mac_reset_work(struct work_struct *work)
return;
dev_info(dev->mt76.dev,"\n%s L1 SER recovery start.",
- wiphy_name(dev->mt76.hw->wiphy));
+ wiphy_name(hw->wiphy));
+
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2))
+ mtk_wed_device_stop(&dev->mt76.mmio.wed_hif2);
+
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
+ mtk_wed_device_stop(&dev->mt76.mmio.wed);
+
ieee80211_stop_queues(mt76_hw(dev));
- if (phy2)
- ieee80211_stop_queues(phy2->mt76->hw);
- if (phy3)
- ieee80211_stop_queues(phy3->mt76->hw);
set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ mt76_abort_scan(&dev->mt76);
wake_up(&dev->mt76.mcu.wait);
- cancel_delayed_work_sync(&dev->mphy.mac_work);
- if (phy2) {
- set_bit(MT76_RESET, &phy2->mt76->state);
- cancel_delayed_work_sync(&phy2->mt76->mac_work);
- }
- if (phy3) {
- set_bit(MT76_RESET, &phy3->mt76->state);
- cancel_delayed_work_sync(&phy3->mt76->mac_work);
+
+ cancel_work_sync(&dev->wed_rro.work);
+ mt7996_for_each_phy(dev, phy) {
+ mt76_abort_roc(phy->mt76);
+ set_bit(MT76_RESET, &phy->mt76->state);
+ cancel_delayed_work_sync(&phy->mt76->mac_work);
}
+
mt76_worker_disable(&dev->mt76.tx_worker);
- mt76_for_each_q_rx(&dev->mt76, i)
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
+ mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
+ continue;
+
napi_disable(&dev->mt76.napi[i]);
+ }
napi_disable(&dev->mt76.tx_napi);
mutex_lock(&dev->mt76.mutex);
+ mt7996_npu_hw_stop(dev);
+
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
if (mt7996_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
@@ -2041,51 +2583,65 @@ void mt7996_mac_reset_work(struct work_struct *work)
mt7996_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
}
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
+ mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
+
+ /* enable DMA Rx/Tx and interrupt */
+ mt7996_dma_start(dev, false, false);
+
+ if (!is_mt7996(&dev->mt76) && dev->mt76.hwrro_mode == MT76_HWRRO_V3)
+ mt76_wr(dev, MT_RRO_3_0_EMU_CONF, MT_RRO_3_0_EMU_CONF_EN_MASK);
+
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+ u32 wed_irq_mask = MT_INT_TX_DONE_BAND2 |
+ dev->mt76.mmio.irqmask;
+
+ mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
+ mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask,
+ true);
+ mt7996_irq_enable(dev, wed_irq_mask);
+ mt7996_irq_disable(dev, 0);
+ }
+
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) {
+ mt76_wr(dev, MT_INT_PCIE1_MASK_CSR, MT_INT_TX_RX_DONE_EXT);
+ mtk_wed_device_start(&dev->mt76.mmio.wed_hif2,
+ MT_INT_TX_RX_DONE_EXT);
+ }
+
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
- clear_bit(MT76_RESET, &dev->mphy.state);
- if (phy2)
- clear_bit(MT76_RESET, &phy2->mt76->state);
- if (phy3)
- clear_bit(MT76_RESET, &phy3->mt76->state);
+ mt7996_for_each_phy(dev, phy)
+ clear_bit(MT76_RESET, &phy->mt76->state);
- local_bh_disable();
mt76_for_each_q_rx(&dev->mt76, i) {
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
+ mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
+ continue;
+
napi_enable(&dev->mt76.napi[i]);
+ local_bh_disable();
napi_schedule(&dev->mt76.napi[i]);
+ local_bh_enable();
}
- local_bh_enable();
tasklet_schedule(&dev->mt76.irq_tasklet);
- mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
- mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
-
mt76_worker_enable(&dev->mt76.tx_worker);
- local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
+ local_bh_disable();
napi_schedule(&dev->mt76.tx_napi);
local_bh_enable();
- ieee80211_wake_queues(mt76_hw(dev));
- if (phy2)
- ieee80211_wake_queues(phy2->mt76->hw);
- if (phy3)
- ieee80211_wake_queues(phy3->mt76->hw);
+ ieee80211_wake_queues(hw);
+ mt7996_update_beacons(dev);
mutex_unlock(&dev->mt76.mutex);
- mt7996_update_beacons(dev);
+ mt7996_npu_hw_init(dev);
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
- MT7996_WATCHDOG_TIME);
- if (phy2)
- ieee80211_queue_delayed_work(phy2->mt76->hw,
- &phy2->mt76->mac_work,
- MT7996_WATCHDOG_TIME);
- if (phy3)
- ieee80211_queue_delayed_work(phy3->mt76->hw,
- &phy3->mt76->mac_work,
+ mt7996_for_each_phy(dev, phy)
+ ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
MT7996_WATCHDOG_TIME);
dev_info(dev->mt76.dev,"\n%s L1 SER recovery completed.",
wiphy_name(dev->mt76.hw->wiphy));
@@ -2186,8 +2742,8 @@ void mt7996_reset(struct mt7996_dev *dev)
void mt7996_mac_update_stats(struct mt7996_phy *phy)
{
+ struct mt76_mib_stats *mib = &phy->mib;
struct mt7996_dev *dev = phy->dev;
- struct mib_stats *mib = &phy->mib;
u8 band_idx = phy->mt76->band_idx;
u32 cnt;
int i;
@@ -2328,36 +2884,48 @@ void mt7996_mac_update_stats(struct mt7996_phy *phy)
void mt7996_mac_sta_rc_work(struct work_struct *work)
{
struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work);
- struct ieee80211_sta *sta;
+ struct mt7996_sta_link *msta_link;
struct ieee80211_vif *vif;
- struct mt7996_sta *msta;
- u32 changed;
+ struct mt7996_vif *mvif;
LIST_HEAD(list);
+ u32 changed;
- spin_lock_bh(&dev->sta_poll_lock);
+ mutex_lock(&dev->mt76.mutex);
+
+ spin_lock_bh(&dev->mt76.sta_poll_lock);
list_splice_init(&dev->sta_rc_list, &list);
while (!list_empty(&list)) {
- msta = list_first_entry(&list, struct mt7996_sta, rc_list);
- list_del_init(&msta->rc_list);
- changed = msta->changed;
- msta->changed = 0;
- spin_unlock_bh(&dev->sta_poll_lock);
+ msta_link = list_first_entry(&list, struct mt7996_sta_link,
+ rc_list);
+ list_del_init(&msta_link->rc_list);
+
+ changed = msta_link->changed;
+ msta_link->changed = 0;
+ mvif = msta_link->sta->vif;
+ vif = container_of((void *)mvif, struct ieee80211_vif,
+ drv_priv);
- sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
- vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
IEEE80211_RC_NSS_CHANGED |
IEEE80211_RC_BW_CHANGED))
- mt7996_mcu_add_rate_ctrl(dev, vif, sta, true);
+ mt7996_mcu_add_rate_ctrl(dev, msta_link->sta, vif,
+ msta_link->wcid.link_id,
+ true);
- /* TODO: smps change */
+ if (changed & IEEE80211_RC_SMPS_CHANGED)
+ mt7996_mcu_set_fixed_field(dev, msta_link->sta, NULL,
+ msta_link->wcid.link_id,
+ RATE_PARAM_MMPS_UPDATE);
- spin_lock_bh(&dev->sta_poll_lock);
+ spin_lock_bh(&dev->mt76.sta_poll_lock);
}
- spin_unlock_bh(&dev->sta_poll_lock);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
+
+ mutex_unlock(&dev->mt76.mutex);
}
void mt7996_mac_work(struct work_struct *work)
@@ -2376,6 +2944,12 @@ void mt7996_mac_work(struct work_struct *work)
mphy->mac_work_count = 0;
mt7996_mac_update_stats(phy);
+
+ mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_RATE);
+ if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
+ mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_ADM_STAT);
+ mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_MSDU_COUNT);
+ }
}
mutex_unlock(&mphy->dev->mutex);
@@ -2389,16 +2963,15 @@ void mt7996_mac_work(struct work_struct *work)
static void mt7996_dfs_stop_radar_detector(struct mt7996_phy *phy)
{
struct mt7996_dev *dev = phy->dev;
+ int rdd_idx = mt7996_get_rdd_idx(phy, false);
- if (phy->rdd_state & BIT(0))
- mt7996_mcu_rdd_cmd(dev, RDD_STOP, 0,
- MT_RX_SEL0, 0);
- if (phy->rdd_state & BIT(1))
- mt7996_mcu_rdd_cmd(dev, RDD_STOP, 1,
- MT_RX_SEL0, 0);
+ if (rdd_idx < 0)
+ return;
+
+ mt7996_mcu_rdd_cmd(dev, RDD_STOP, rdd_idx, 0);
}
-static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int chain)
+static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int rdd_idx)
{
int err, region;
@@ -2415,44 +2988,30 @@ static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int chain)
break;
}
- err = mt7996_mcu_rdd_cmd(dev, RDD_START, chain,
- MT_RX_SEL0, region);
+ err = mt7996_mcu_rdd_cmd(dev, RDD_START, rdd_idx, region);
if (err < 0)
return err;
- return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, chain,
- MT_RX_SEL0, 1);
+ return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, rdd_idx, 1);
}
static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy)
{
- struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
struct mt7996_dev *dev = phy->dev;
- u8 band_idx = phy->mt76->band_idx;
- int err;
+ int err, rdd_idx;
- /* start CAC */
- err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, band_idx,
- MT_RX_SEL0, 0);
- if (err < 0)
- return err;
+ rdd_idx = mt7996_get_rdd_idx(phy, false);
+ if (rdd_idx < 0)
+ return -EINVAL;
- err = mt7996_dfs_start_rdd(dev, band_idx);
+ /* start CAC */
+ err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, rdd_idx, 0);
if (err < 0)
return err;
- phy->rdd_state |= BIT(band_idx);
+ err = mt7996_dfs_start_rdd(dev, rdd_idx);
- if (chandef->width == NL80211_CHAN_WIDTH_160 ||
- chandef->width == NL80211_CHAN_WIDTH_80P80) {
- err = mt7996_dfs_start_rdd(dev, 1);
- if (err < 0)
- return err;
-
- phy->rdd_state |= BIT(1);
- }
-
- return 0;
+ return err;
}
static int
@@ -2493,12 +3052,12 @@ int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy)
{
struct mt7996_dev *dev = phy->dev;
enum mt76_dfs_state dfs_state, prev_state;
- int err;
+ int err, rdd_idx = mt7996_get_rdd_idx(phy, false);
prev_state = phy->mt76->dfs_state;
dfs_state = mt76_phy_dfs_state(phy->mt76);
- if (prev_state == dfs_state)
+ if (prev_state == dfs_state || rdd_idx < 0)
return 0;
if (prev_state == MT_DFS_STATE_UNKNOWN)
@@ -2522,8 +3081,7 @@ int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy)
if (dfs_state == MT_DFS_STATE_CAC)
return 0;
- err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END,
- phy->mt76->band_idx, MT_RX_SEL0, 0);
+ err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END, rdd_idx, 0);
if (err < 0) {
phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
return err;
@@ -2533,8 +3091,7 @@ int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy)
return 0;
stop:
- err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START,
- phy->mt76->band_idx, MT_RX_SEL0, 0);
+ err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START, rdd_idx, 0);
if (err < 0)
return err;
@@ -2618,6 +3175,34 @@ static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt)
return 0;
}
+static bool
+mt7996_mac_twt_param_equal(struct mt7996_sta_link *msta_link,
+ struct ieee80211_twt_params *twt_agrt)
+{
+ u16 type = le16_to_cpu(twt_agrt->req_type);
+ u8 exp;
+ int i;
+
+ exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type);
+ for (i = 0; i < MT7996_MAX_STA_TWT_AGRT; i++) {
+ struct mt7996_twt_flow *f;
+
+ if (!(msta_link->twt.flowid_mask & BIT(i)))
+ continue;
+
+ f = &msta_link->twt.flow[i];
+ if (f->duration == twt_agrt->min_twt_dur &&
+ f->mantissa == twt_agrt->mantissa &&
+ f->exp == exp &&
+ f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) &&
+ f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) &&
+ f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER))
+ return true;
+ }
+
+ return false;
+}
+
void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
struct ieee80211_twt_setup *twt)
@@ -2625,12 +3210,12 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT;
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
struct ieee80211_twt_params *twt_agrt = (void *)twt->params;
+ struct mt7996_sta_link *msta_link = &msta->deflink;
u16 req_type = le16_to_cpu(twt_agrt->req_type);
enum ieee80211_twt_setup_cmd sta_setup_cmd;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mt7996_twt_flow *flow;
- int flowid, table_id;
- u8 exp;
+ u8 flowid, table_id, exp;
if (mt7996_mac_check_twt_req(twt))
goto out;
@@ -2640,21 +3225,32 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
if (dev->twt.n_agrt == MT7996_MAX_TWT_AGRT)
goto unlock;
- if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
+ if (hweight8(msta_link->twt.flowid_mask) ==
+ ARRAY_SIZE(msta_link->twt.flow))
+ goto unlock;
+
+ if (twt_agrt->min_twt_dur < MT7996_MIN_TWT_DUR) {
+ setup_cmd = TWT_SETUP_CMD_DICTATE;
+ twt_agrt->min_twt_dur = MT7996_MIN_TWT_DUR;
+ goto unlock;
+ }
+
+ if (mt7996_mac_twt_param_equal(msta_link, twt_agrt))
goto unlock;
- flowid = ffs(~msta->twt.flowid_mask) - 1;
- le16p_replace_bits(&twt_agrt->req_type, flowid,
- IEEE80211_TWT_REQTYPE_FLOWID);
+ flowid = ffs(~msta_link->twt.flowid_mask) - 1;
+ twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID);
+ twt_agrt->req_type |= le16_encode_bits(flowid,
+ IEEE80211_TWT_REQTYPE_FLOWID);
table_id = ffs(~dev->twt.table_mask) - 1;
exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type);
- flow = &msta->twt.flow[flowid];
+ flow = &msta_link->twt.flow[flowid];
memset(flow, 0, sizeof(*flow));
INIT_LIST_HEAD(&flow->list);
- flow->wcid = msta->wcid.idx;
+ flow->wcid = msta_link->wcid.idx;
flow->table_id = table_id;
flow->id = flowid;
flow->duration = twt_agrt->min_twt_dur;
@@ -2672,7 +3268,7 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
flow->sched = true;
flow->start_tsf = mt7996_mac_twt_sched_list_add(dev, flow);
- curr_tsf = __mt7996_get_tsf(hw, msta->vif);
+ curr_tsf = __mt7996_get_tsf(hw, &msta->vif->deflink);
div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem);
flow_tsf = curr_tsf + interval - rem;
twt_agrt->twt = cpu_to_le64(flow_tsf);
@@ -2681,44 +3277,45 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
}
flow->tsf = le64_to_cpu(twt_agrt->twt);
- if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD))
+ if (mt7996_mcu_twt_agrt_update(dev, &msta->vif->deflink, flow,
+ MCU_TWT_AGRT_ADD))
goto unlock;
setup_cmd = TWT_SETUP_CMD_ACCEPT;
dev->twt.table_mask |= BIT(table_id);
- msta->twt.flowid_mask |= BIT(flowid);
+ msta_link->twt.flowid_mask |= BIT(flowid);
dev->twt.n_agrt++;
unlock:
mutex_unlock(&dev->mt76.mutex);
out:
- le16p_replace_bits(&twt_agrt->req_type, setup_cmd,
- IEEE80211_TWT_REQTYPE_SETUP_CMD);
- twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) |
- (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED);
+ twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD);
+ twt_agrt->req_type |=
+ le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD);
+ twt->control = twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED;
}
void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
- struct mt7996_sta *msta,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link,
u8 flowid)
{
struct mt7996_twt_flow *flow;
lockdep_assert_held(&dev->mt76.mutex);
- if (flowid >= ARRAY_SIZE(msta->twt.flow))
+ if (flowid >= ARRAY_SIZE(msta_link->twt.flow))
return;
- if (!(msta->twt.flowid_mask & BIT(flowid)))
+ if (!(msta_link->twt.flowid_mask & BIT(flowid)))
return;
- flow = &msta->twt.flow[flowid];
- if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow,
- MCU_TWT_AGRT_DELETE))
+ flow = &msta_link->twt.flow[flowid];
+ if (mt7996_mcu_twt_agrt_update(dev, link, flow, MCU_TWT_AGRT_DELETE))
return;
list_del_init(&flow->list);
- msta->twt.flowid_mask &= ~BIT(flowid);
+ msta_link->twt.flowid_mask &= ~BIT(flowid);
dev->twt.table_mask &= ~BIT(flow->table_id);
dev->twt.n_agrt--;
}