summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/mediatek/mt76/mt7615/mac.c')
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c470
1 files changed, 225 insertions, 245 deletions
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index ec25e5a95d44..bd56cdb022a2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (C) 2019 MediaTek Inc.
*
* Author: Ryder Lee <ryder.lee@mediatek.com>
@@ -90,10 +90,7 @@ static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
struct mt7615_sta *sta;
struct mt76_wcid *wcid;
- if (idx >= MT7615_WTBL_SIZE)
- return NULL;
-
- wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ wcid = mt76_wcid_ptr(dev, idx);
if (unicast || !wcid)
return wcid;
@@ -107,8 +104,9 @@ static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
return &sta->vif->sta.wcid;
}
-void mt7615_mac_reset_counters(struct mt7615_dev *dev)
+void mt7615_mac_reset_counters(struct mt7615_phy *phy)
{
+ struct mt7615_dev *dev = phy->dev;
int i;
for (i = 0; i < 4; i++) {
@@ -116,10 +114,8 @@ void mt7615_mac_reset_counters(struct mt7615_dev *dev)
mt76_rr(dev, MT_TX_AGG_CNT(1, i));
}
- memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
- dev->mt76.phy.survey_time = ktime_get_boottime();
- if (dev->mt76.phy2)
- dev->mt76.phy2->survey_time = ktime_get_boottime();
+ memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
+ phy->mt76->survey_time = ktime_get_boottime();
/* reset airtime counters */
mt76_rr(dev, MT_MIB_SDR9(0));
@@ -253,15 +249,15 @@ static void mt7615_mac_fill_tm_rx(struct mt7615_phy *phy, __le32 *rxv)
static int mt7615_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
struct mt7615_sta *msta = (struct mt7615_sta *)status->wcid;
+ __le32 *rxd = (__le32 *)skb->data;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
struct ieee80211_hdr hdr;
- struct ethhdr eth_hdr;
- __le32 *rxd = (__le32 *)skb->data;
- __le32 qos_ctrl, ht_ctrl;
+ u16 frame_control;
- if (FIELD_GET(MT_RXD1_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[1])) !=
+ if (le32_get_bits(rxd[1], MT_RXD1_NORMAL_ADDR_TYPE) !=
MT_RXD1_NORMAL_U2M)
return -EINVAL;
@@ -275,47 +271,53 @@ static int mt7615_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
/* store the info from RXD and ethhdr to avoid being overridden */
- memcpy(&eth_hdr, skb->data + hdr_gap, sizeof(eth_hdr));
- hdr.frame_control = FIELD_GET(MT_RXD4_FRAME_CONTROL, rxd[4]);
- hdr.seq_ctrl = FIELD_GET(MT_RXD6_SEQ_CTRL, rxd[6]);
- qos_ctrl = FIELD_GET(MT_RXD6_QOS_CTL, rxd[6]);
- ht_ctrl = FIELD_GET(MT_RXD7_HT_CONTROL, rxd[7]);
-
+ frame_control = le32_get_bits(rxd[4], MT_RXD4_FRAME_CONTROL);
+ hdr.frame_control = cpu_to_le16(frame_control);
+ hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[6], MT_RXD6_SEQ_CTRL));
hdr.duration_id = 0;
+
ether_addr_copy(hdr.addr1, vif->addr);
ether_addr_copy(hdr.addr2, sta->addr);
- switch (le16_to_cpu(hdr.frame_control) &
- (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
+ switch (frame_control & (IEEE80211_FCTL_TODS |
+ IEEE80211_FCTL_FROMDS)) {
case 0:
ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
break;
case IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_source);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_source);
break;
case IEEE80211_FCTL_TODS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
break;
case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
- ether_addr_copy(hdr.addr4, eth_hdr.h_source);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
+ ether_addr_copy(hdr.addr4, eth_hdr->h_source);
break;
default:
break;
}
skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
- if (eth_hdr.h_proto == htons(ETH_P_AARP) ||
- eth_hdr.h_proto == htons(ETH_P_IPX))
+ if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
+ eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
- else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN))
+ else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
else
skb_pull(skb, 2);
if (ieee80211_has_order(hdr.frame_control))
- memcpy(skb_push(skb, 2), &ht_ctrl, 2);
- if (ieee80211_is_data_qos(hdr.frame_control))
- memcpy(skb_push(skb, 2), &qos_ctrl, 2);
+ memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[7],
+ IEEE80211_HT_CTL_LEN);
+
+ if (ieee80211_is_data_qos(hdr.frame_control)) {
+ __le16 qos_ctrl;
+
+ qos_ctrl = cpu_to_le16(le32_get_bits(rxd[6], MT_RXD6_QOS_CTL));
+ memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
+ IEEE80211_QOS_CTL_LEN);
+ }
+
if (ieee80211_has_a4(hdr.frame_control))
memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
else
@@ -330,14 +332,15 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt7615_phy *phy = &dev->phy;
- struct mt7615_phy *phy2 = dev->mt76.phy2 ? dev->mt76.phy2->priv : NULL;
struct ieee80211_supported_band *sband;
struct ieee80211_hdr *hdr;
+ struct mt7615_phy *phy2;
__le32 *rxd = (__le32 *)skb->data;
u32 rxd0 = le32_to_cpu(rxd[0]);
u32 rxd1 = le32_to_cpu(rxd[1]);
u32 rxd2 = le32_to_cpu(rxd[2]);
u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
+ u32 csum_status = *(u32 *)skb->cb;
bool unicast, hdr_trans, remove_pad, insert_ccmp_hdr = false;
u16 hdr_gap;
int phy_idx;
@@ -349,6 +352,8 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
memset(status, 0, sizeof(*status));
chfreq = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1);
+
+ phy2 = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL;
if (!phy2)
phy_idx = 0;
else if (phy2->chfreq == phy->chfreq)
@@ -379,13 +384,11 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
struct mt7615_sta *msta;
msta = container_of(status->wcid, struct mt7615_sta, wcid);
- spin_lock_bh(&dev->sta_poll_lock);
- if (list_empty(&msta->poll_list))
- list_add_tail(&msta->poll_list, &dev->sta_poll_list);
- spin_unlock_bh(&dev->sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
}
- if ((rxd0 & csum_mask) == csum_mask)
+ if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask &&
+ !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
@@ -492,9 +495,9 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
}
if (phy_idx == 1 && phy2) {
- mphy = dev->mt76.phy2;
+ mphy = dev->mt76.phys[MT_BAND1];
phy = phy2;
- status->ext_phy = true;
+ status->phy_idx = phy_idx;
}
if (!mt7615_firmware_offload(dev) && chfreq != phy->chfreq)
@@ -570,15 +573,6 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3);
status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3);
status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3);
- status->signal = status->chain_signal[0];
-
- for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
- if (!(status->chains & BIT(i)))
- continue;
-
- status->signal = max(status->signal,
- status->chain_signal[i]);
- }
mt7615_mac_fill_tm_rx(mphy->priv, rxd);
@@ -610,14 +604,14 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
* When header translation failure is indicated,
* the hardware will insert an extra 2-byte field
* containing the data length after the protocol
- * type field.
+ * type field. This happens either when the LLC-SNAP
+ * pattern did not match, or if a VLAN header was
+ * detected.
*/
pad_start = 12;
if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
pad_start += 4;
-
- if (get_unaligned_be16(skb->data + pad_start) !=
- skb->len - pad_start - 2)
+ else
pad_start = 0;
}
@@ -655,11 +649,6 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
return 0;
}
-void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
-{
-}
-EXPORT_SYMBOL_GPL(mt7615_sta_ps);
-
static u16
mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
struct mt76_phy *mphy,
@@ -720,13 +709,14 @@ mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta, int pid,
- struct ieee80211_key_conf *key, bool beacon)
+ struct ieee80211_key_conf *key,
+ enum mt76_txq_id qid, bool beacon)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *rate = &info->control.rates[0];
- bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
+ u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
bool multicast = is_multicast_ether_addr(hdr->addr1);
struct ieee80211_vif *vif = info->control.vif;
bool is_mmio = mt76_is_mmio(&dev->mt76);
@@ -737,7 +727,7 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
u16 seqno = 0;
if (vif) {
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
omac_idx = mvif->omac_idx;
wmm_idx = mvif->wmm_idx;
@@ -749,18 +739,18 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
tx_count = msta->rate_count;
}
- if (ext_phy && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if (phy_idx && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
if (beacon) {
p_fmt = MT_TX_TYPE_FW;
- q_idx = ext_phy ? MT_LMAC_BCN1 : MT_LMAC_BCN0;
- } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
+ q_idx = phy_idx ? MT_LMAC_BCN1 : MT_LMAC_BCN0;
+ } else if (qid >= MT_TXQ_PSD) {
p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
- q_idx = ext_phy ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0;
+ q_idx = phy_idx ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0;
} else {
p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
q_idx = wmm_idx * MT7615_MAX_WMM_SETS +
@@ -879,60 +869,6 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
}
EXPORT_SYMBOL_GPL(mt7615_mac_write_txwi);
-static void
-mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp)
-{
- int i;
-
- for (i = 0; i < txp->nbuf; i++)
- dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
- le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
-}
-
-static void
-mt7615_txp_skb_unmap_hw(struct mt76_dev *dev, struct mt7615_hw_txp *txp)
-{
- u32 last_mask;
- int i;
-
- last_mask = is_mt7663(dev) ? MT_TXD_LEN_LAST : MT_TXD_LEN_MSDU_LAST;
-
- for (i = 0; i < ARRAY_SIZE(txp->ptr); i++) {
- struct mt7615_txp_ptr *ptr = &txp->ptr[i];
- bool last;
- u16 len;
-
- len = le16_to_cpu(ptr->len0);
- last = len & last_mask;
- len &= MT_TXD_LEN_MASK;
- dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len,
- DMA_TO_DEVICE);
- if (last)
- break;
-
- len = le16_to_cpu(ptr->len1);
- last = len & last_mask;
- len &= MT_TXD_LEN_MASK;
- dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len,
- DMA_TO_DEVICE);
- if (last)
- break;
- }
-}
-
-void mt7615_txp_skb_unmap(struct mt76_dev *dev,
- struct mt76_txwi_cache *t)
-{
- struct mt7615_txp_common *txp;
-
- txp = mt7615_txwi_to_txp(dev, t);
- if (is_mt7615(dev))
- mt7615_txp_skb_unmap_fw(dev, &txp->fw);
- else
- mt7615_txp_skb_unmap_hw(dev, &txp->hw);
-}
-EXPORT_SYMBOL_GPL(mt7615_txp_skb_unmap);
-
bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask)
{
mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
@@ -963,16 +899,19 @@ void mt7615_mac_sta_poll(struct mt7615_dev *dev)
int i;
INIT_LIST_HEAD(&sta_poll_list);
- spin_lock_bh(&dev->sta_poll_lock);
- list_splice_init(&dev->sta_poll_list, &sta_poll_list);
- spin_unlock_bh(&dev->sta_poll_lock);
+ spin_lock_bh(&dev->mt76.sta_poll_lock);
+ list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
while (!list_empty(&sta_poll_list)) {
bool clear = false;
msta = list_first_entry(&sta_poll_list, struct mt7615_sta,
- poll_list);
- list_del_init(&msta->poll_list);
+ wcid.poll_list);
+
+ spin_lock_bh(&dev->mt76.sta_poll_lock);
+ list_del_init(&msta->wcid.poll_list);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4;
@@ -1141,7 +1080,7 @@ u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid)
offset %= 32;
val = mt76_rr(dev, addr);
- val >>= (tid % 32);
+ val >>= offset;
if (offset > 20) {
addr += 4;
@@ -1228,11 +1167,25 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
}
EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
+void mt7615_mac_enable_rtscts(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif, bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ u32 addr;
+
+ addr = mt7615_mac_wtbl_addr(dev, mvif->sta.wcid.idx) + 3 * 4;
+
+ if (enable)
+ mt76_set(dev, addr, MT_WTBL_W3_RTS);
+ else
+ mt76_clear(dev, addr, MT_WTBL_W3_RTS);
+}
+EXPORT_SYMBOL_GPL(mt7615_mac_enable_rtscts);
+
static int
mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key,
- enum mt76_cipher_type cipher, u16 cipher_mask,
- enum set_key_cmd cmd)
+ enum mt76_cipher_type cipher, u16 cipher_mask)
{
u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
u8 data[32] = {};
@@ -1241,27 +1194,18 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
return -EINVAL;
mt76_rr_copy(dev, addr, data, sizeof(data));
- if (cmd == SET_KEY) {
- if (cipher == MT_CIPHER_TKIP) {
- /* Rx/Tx MIC keys are swapped */
- memcpy(data, key->key, 16);
- memcpy(data + 16, key->key + 24, 8);
- memcpy(data + 24, key->key + 16, 8);
- } else {
- if (cipher_mask == BIT(cipher))
- memcpy(data, key->key, key->keylen);
- else if (cipher != MT_CIPHER_BIP_CMAC_128)
- memcpy(data, key->key, 16);
- if (cipher == MT_CIPHER_BIP_CMAC_128)
- memcpy(data + 16, key->key, 16);
- }
+ if (cipher == MT_CIPHER_TKIP) {
+ /* Rx/Tx MIC keys are swapped */
+ memcpy(data, key->key, 16);
+ memcpy(data + 16, key->key + 24, 8);
+ memcpy(data + 24, key->key + 16, 8);
} else {
+ if (cipher_mask == BIT(cipher))
+ memcpy(data, key->key, key->keylen);
+ else if (cipher != MT_CIPHER_BIP_CMAC_128)
+ memcpy(data, key->key, 16);
if (cipher == MT_CIPHER_BIP_CMAC_128)
- memset(data + 16, 0, 16);
- else if (cipher_mask)
- memset(data, 0, 16);
- if (!cipher_mask)
- memset(data, 0, sizeof(data));
+ memcpy(data + 16, key->key, 16);
}
mt76_wr_copy(dev, addr, data, sizeof(data));
@@ -1272,7 +1216,7 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
static int
mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
enum mt76_cipher_type cipher, u16 cipher_mask,
- int keyidx, enum set_key_cmd cmd)
+ int keyidx)
{
u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
@@ -1291,9 +1235,7 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
else
w0 &= ~MT_WTBL_W0_RX_IK_VALID;
- if (cmd == SET_KEY &&
- (cipher != MT_CIPHER_BIP_CMAC_128 ||
- cipher_mask == BIT(cipher))) {
+ if (cipher != MT_CIPHER_BIP_CMAC_128 || cipher_mask == BIT(cipher)) {
w0 &= ~MT_WTBL_W0_KEY_IDX;
w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
}
@@ -1310,19 +1252,10 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
static void
mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
- enum mt76_cipher_type cipher, u16 cipher_mask,
- enum set_key_cmd cmd)
+ enum mt76_cipher_type cipher, u16 cipher_mask)
{
u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
- if (!cipher_mask) {
- mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
- return;
- }
-
- if (cmd != SET_KEY)
- return;
-
if (cipher == MT_CIPHER_BIP_CMAC_128 &&
cipher_mask & ~BIT(MT_CIPHER_BIP_CMAC_128))
return;
@@ -1333,8 +1266,7 @@ mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
struct mt76_wcid *wcid,
- struct ieee80211_key_conf *key,
- enum set_key_cmd cmd)
+ struct ieee80211_key_conf *key)
{
enum mt76_cipher_type cipher;
u16 cipher_mask = wcid->cipher;
@@ -1344,19 +1276,14 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
if (cipher == MT_CIPHER_NONE)
return -EOPNOTSUPP;
- if (cmd == SET_KEY)
- cipher_mask |= BIT(cipher);
- else
- cipher_mask &= ~BIT(cipher);
-
- mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask, cmd);
- err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask,
- cmd);
+ cipher_mask |= BIT(cipher);
+ mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask);
+ err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask);
if (err < 0)
return err;
err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, cipher_mask,
- key->keyidx, cmd);
+ key->keyidx);
if (err < 0)
return err;
@@ -1367,13 +1294,12 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
struct mt76_wcid *wcid,
- struct ieee80211_key_conf *key,
- enum set_key_cmd cmd)
+ struct ieee80211_key_conf *key)
{
int err;
spin_lock_bh(&dev->mt76.lock);
- err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
+ err = __mt7615_mac_wtbl_set_key(dev, wcid, key);
spin_unlock_bh(&dev->mt76.lock);
return err;
@@ -1430,7 +1356,7 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
}
rate_set_tsf = READ_ONCE(sta->rate_set_tsf);
- rs_idx = !((u32)(FIELD_GET(MT_TXS4_F0_TIMESTAMP, le32_to_cpu(txs_data[4])) -
+ rs_idx = !((u32)(le32_get_bits(txs_data[4], MT_TXS4_F0_TIMESTAMP) -
rate_set_tsf) < 1000000);
rs_idx ^= rate_set_tsf & BIT(0);
rs = &sta->rateset[rs_idx];
@@ -1442,8 +1368,8 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
if (sta->rate_probe) {
struct mt7615_phy *phy = &dev->phy;
- if (sta->wcid.ext_phy && dev->mt76.phy2)
- phy = dev->mt76.phy2->priv;
+ if (sta->wcid.phy_idx && dev->mt76.phys[MT_BAND1])
+ phy = dev->mt76.phys[MT_BAND1]->priv;
mt7615_mac_set_rates(phy, sta, NULL, sta->rates);
}
@@ -1485,8 +1411,8 @@ out:
fallthrough;
case MT_PHY_TYPE_OFDM:
mphy = &dev->mphy;
- if (sta->wcid.ext_phy && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if (sta->wcid.phy_idx && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
sband = &mphy->sband_5g.sband;
@@ -1561,14 +1487,11 @@ static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
struct mt76_wcid *wcid;
struct mt76_phy *mphy = &dev->mt76.phy;
__le32 *txs_data = data;
- u32 txs;
u8 wcidx;
u8 pid;
- txs = le32_to_cpu(txs_data[0]);
- pid = FIELD_GET(MT_TXS0_PID, txs);
- txs = le32_to_cpu(txs_data[2]);
- wcidx = FIELD_GET(MT_TXS2_WCID, txs);
+ pid = le32_get_bits(txs_data[0], MT_TXS0_PID);
+ wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
if (pid == MT_PACKET_ID_NO_ACK)
return;
@@ -1578,17 +1501,13 @@ static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
rcu_read_lock();
- wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
+ wcid = mt76_wcid_ptr(dev, wcidx);
if (!wcid)
goto out;
msta = container_of(wcid, struct mt7615_sta, wcid);
sta = wcid_to_sta(wcid);
-
- spin_lock_bh(&dev->sta_poll_lock);
- if (list_empty(&msta->poll_list))
- list_add_tail(&msta->poll_list, &dev->sta_poll_list);
- spin_unlock_bh(&dev->sta_poll_lock);
+ mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data))
goto out;
@@ -1596,11 +1515,14 @@ static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
if (wcidx >= MT7615_WTBL_STA || !sta)
goto out;
- if (wcid->ext_phy && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if (wcid->phy_idx && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
- if (mt7615_fill_txs(dev, msta, &info, txs_data))
+ if (mt7615_fill_txs(dev, msta, &info, txs_data)) {
+ spin_lock_bh(&dev->mt76.rx_lock);
ieee80211_tx_status_noskb(mphy->hw, sta, &info);
+ spin_unlock_bh(&dev->mt76.rx_lock);
+ }
out:
rcu_read_unlock();
@@ -1614,7 +1536,7 @@ mt7615_txwi_free(struct mt7615_dev *dev, struct mt76_txwi_cache *txwi)
u32 val;
u8 wcid;
- mt7615_txp_skb_unmap(mdev, txwi);
+ mt76_connac_txp_skb_unmap(mdev, txwi);
if (!txwi->skb)
goto out;
@@ -1642,9 +1564,11 @@ mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token)
mt7615_txwi_free(dev, txwi);
}
-static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
+static void mt7615_mac_tx_free(struct mt7615_dev *dev, void *data, int len)
{
- struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
+ struct mt76_connac_tx_free *free = data;
+ void *tx_token = data + sizeof(*free);
+ void *end = data + len;
u8 i, count;
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
@@ -1655,21 +1579,25 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
}
- count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
+ count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_ID_CNT);
if (is_mt7615(&dev->mt76)) {
- __le16 *token = &free->token[0];
+ __le16 *token = tx_token;
+
+ if (WARN_ON_ONCE((void *)&token[count] > end))
+ return;
for (i = 0; i < count; i++)
mt7615_mac_tx_free_token(dev, le16_to_cpu(token[i]));
} else {
- __le32 *token = (__le32 *)&free->token[0];
+ __le32 *token = tx_token;
+
+ if (WARN_ON_ONCE((void *)&token[count] > end))
+ return;
for (i = 0; i < count; i++)
mt7615_mac_tx_free_token(dev, le32_to_cpu(token[i]));
}
- dev_kfree_skb(skb);
-
rcu_read_lock();
mt7615_mac_sta_poll(dev);
rcu_read_unlock();
@@ -1677,8 +1605,31 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
mt76_worker_schedule(&dev->mt76.tx_worker);
}
+bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ __le32 *rxd = (__le32 *)data;
+ __le32 *end = (__le32 *)&rxd[len / 4];
+ enum rx_pkt_type type;
+
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+
+ switch (type) {
+ case PKT_TYPE_TXRX_NOTIFY:
+ mt7615_mac_tx_free(dev, data, len);
+ return false;
+ case PKT_TYPE_TXS:
+ for (rxd++; rxd + 7 <= end; rxd += 7)
+ mt7615_mac_add_txs(dev, rxd);
+ return false;
+ default:
+ return true;
+ }
+}
+EXPORT_SYMBOL_GPL(mt7615_rx_check);
+
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb)
+ struct sk_buff *skb, u32 *info)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
__le32 *rxd = (__le32 *)skb->data;
@@ -1686,8 +1637,8 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
enum rx_pkt_type type;
u16 flag;
- type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
- flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0]));
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+ flag = le32_get_bits(rxd[0], MT_RXD0_PKT_FLAG);
if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
type = PKT_TYPE_NORMAL_MCU;
@@ -1698,7 +1649,8 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
dev_kfree_skb(skb);
break;
case PKT_TYPE_TXRX_NOTIFY:
- mt7615_mac_tx_free(dev, skb);
+ mt7615_mac_tx_free(dev, skb->data, skb->len);
+ dev_kfree_skb(skb);
break;
case PKT_TYPE_RX_EVENT:
mt7615_mcu_rx_event(dev, skb);
@@ -1835,7 +1787,7 @@ mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy,
struct mt7615_dev *dev = phy->dev;
int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck;
bool ext_phy = phy != &dev->phy;
- u16 def_th = ofdm ? -98 : -110;
+ s16 def_th = ofdm ? -98 : -110;
bool update = false;
s8 *sensitivity;
int signal;
@@ -1984,6 +1936,7 @@ mt7615_phy_update_channel(struct mt76_phy *mphy, int idx)
static void mt7615_update_survey(struct mt7615_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_phy *mphy_ext = mdev->phys[MT_BAND1];
ktime_t cur_time;
/* MT7615 can only update both phys simultaneously
@@ -1991,14 +1944,14 @@ static void mt7615_update_survey(struct mt7615_dev *dev)
*/
mt7615_phy_update_channel(&mdev->phy, 0);
- if (mdev->phy2)
- mt7615_phy_update_channel(mdev->phy2, 1);
+ if (mphy_ext)
+ mt7615_phy_update_channel(mphy_ext, 1);
cur_time = ktime_get_boottime();
mt76_update_survey_active_time(&mdev->phy, cur_time);
- if (mdev->phy2)
- mt76_update_survey_active_time(mdev->phy2, cur_time);
+ if (mphy_ext)
+ mt76_update_survey_active_time(mphy_ext, cur_time);
/* reset obss airtime */
mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
@@ -2022,7 +1975,7 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
struct mt7615_dev *dev = phy->dev;
struct mib_stats *mib = &phy->mib;
bool ext_phy = phy != &dev->phy;
- int i, aggr;
+ int i, aggr = 0;
u32 val, val2;
mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
@@ -2036,7 +1989,6 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
mib->aggr_per = 1000 * (val - val2) / val;
}
- aggr = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
for (i = 0; i < 4; i++) {
val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
@@ -2049,8 +2001,8 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
val);
val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
- dev->mt76.aggr_stats[aggr++] += val & 0xffff;
- dev->mt76.aggr_stats[aggr++] += val >> 16;
+ phy->mt76->aggr_stats[aggr++] += val & 0xffff;
+ phy->mt76->aggr_stats[aggr++] += val >> 16;
}
}
@@ -2068,10 +2020,13 @@ void mt7615_pm_wake_work(struct work_struct *work)
int i;
if (mt76_is_sdio(mdev)) {
+ mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
mt76_worker_schedule(&mdev->sdio.txrx_worker);
} else {
+ local_bh_disable();
mt76_for_each_q_rx(mdev, i)
napi_schedule(&mdev->napi[i]);
+ local_bh_enable();
mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
mt76_queue_tx_cleanup(dev, mdev->q_mcu[MT_MCUQ_WM],
false);
@@ -2103,6 +2058,14 @@ void mt7615_pm_power_save_work(struct work_struct *work)
test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state))
goto out;
+ if (mutex_is_locked(&dev->mt76.mutex))
+ /* if mt76 mutex is held we should not put the device
+ * to sleep since we are currently accessing device
+ * register map. We need to wait for the next power_save
+ * trigger.
+ */
+ goto out;
+
if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
delta = dev->pm.last_activity + delta - jiffies;
goto out;
@@ -2160,21 +2123,24 @@ static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy)
struct mt7615_dev *dev = phy->dev;
if (phy->rdd_state & BIT(0))
- mt7615_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0);
+ mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0,
+ MT_RX_SEL0, 0);
if (phy->rdd_state & BIT(1))
- mt7615_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0);
+ mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1,
+ MT_RX_SEL0, 0);
}
static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain)
{
int err;
- err = mt7615_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain,
+ MT_RX_SEL0, 0);
if (err < 0)
return err;
- return mt7615_mcu_rdd_cmd(dev, RDD_DET_MODE, chain,
- MT_RX_SEL0, 1);
+ return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain,
+ MT_RX_SEL0, 1);
}
static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy)
@@ -2185,7 +2151,8 @@ static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy)
int err;
/* start CAC */
- err = mt7615_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, ext_phy,
+ MT_RX_SEL0, 0);
if (err < 0)
return err;
@@ -2249,47 +2216,58 @@ int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy)
struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
struct mt7615_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
+ enum mt76_dfs_state dfs_state, prev_state;
int err;
if (is_mt7663(&dev->mt76))
return 0;
- if (dev->mt76.region == NL80211_DFS_UNSET) {
- phy->dfs_state = -1;
- if (phy->rdd_state)
- goto stop;
+ prev_state = phy->mt76->dfs_state;
+ dfs_state = mt76_phy_dfs_state(phy->mt76);
+ if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
+ dfs_state < MT_DFS_STATE_CAC)
+ dfs_state = MT_DFS_STATE_ACTIVE;
+ if (prev_state == dfs_state)
return 0;
- }
- if (test_bit(MT76_SCANNING, &phy->mt76->state))
- return 0;
+ if (dfs_state == MT_DFS_STATE_DISABLED)
+ goto stop;
- if (phy->dfs_state == chandef->chan->dfs_state)
- return 0;
+ if (prev_state <= MT_DFS_STATE_DISABLED) {
+ err = mt7615_dfs_init_radar_specs(phy);
+ if (err < 0)
+ return err;
- err = mt7615_dfs_init_radar_specs(phy);
- if (err < 0) {
- phy->dfs_state = -1;
- goto stop;
- }
+ err = mt7615_dfs_start_radar_detector(phy);
+ if (err < 0)
+ return err;
- phy->dfs_state = chandef->chan->dfs_state;
+ phy->mt76->dfs_state = MT_DFS_STATE_CAC;
+ }
- if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
- if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
- return mt7615_dfs_start_radar_detector(phy);
+ if (dfs_state == MT_DFS_STATE_CAC)
+ return 0;
- return mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy,
- MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END,
+ ext_phy, MT_RX_SEL0, 0);
+ if (err < 0) {
+ phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
+ return err;
}
+ phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
+ return 0;
+
stop:
- err = mt7615_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, ext_phy,
+ MT_RX_SEL0, 0);
if (err < 0)
return err;
mt7615_dfs_stop_radar_detector(phy);
+ phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
+
return 0;
}
@@ -2365,7 +2343,7 @@ void mt7615_coredump_work(struct work_struct *work)
break;
skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
- if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
+ if (!dump || data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
dev_kfree_skb(skb);
continue;
}
@@ -2375,6 +2353,8 @@ void mt7615_coredump_work(struct work_struct *work)
dev_kfree_skb(skb);
}
- dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
- GFP_KERNEL);
+
+ if (dump)
+ dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
+ GFP_KERNEL);
}