diff options
Diffstat (limited to 'drivers/net/wireless/mediatek/mt76/tx.c')
| -rw-r--r-- | drivers/net/wireless/mediatek/mt76/tx.c | 202 |
1 files changed, 157 insertions, 45 deletions
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 1f309d05380a..9ec6d0b53a84 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: ISC +// SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> */ @@ -64,7 +64,7 @@ mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); struct mt76_wcid *wcid; - wcid = rcu_dereference(dev->wcid[cb->wcid]); + wcid = __mt76_wcid_ptr(dev, cb->wcid); if (wcid) { status.sta = wcid_to_sta(wcid); if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) { @@ -77,7 +77,9 @@ mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) } hw = mt76_tx_status_get_hw(dev, skb); + spin_lock_bh(&dev->rx_lock); ieee80211_tx_status_ext(hw, &status); + spin_unlock_bh(&dev->rx_lock); } rcu_read_unlock(); } @@ -98,7 +100,8 @@ __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags, return; /* Tx status can be unreliable. if it fails, mark the frame as ACKed */ - if (flags & MT_TX_CB_TXS_FAILED) { + if (flags & MT_TX_CB_TXS_FAILED && + (dev->drv->drv_flags & MT_DRV_IGNORE_TXS_FAILED)) { info->status.rates[0].count = 0; info->status.rates[0].idx = -1; info->flags |= IEEE80211_TX_STAT_ACK; @@ -119,6 +122,7 @@ int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, struct sk_buff *skb) { + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); int pid; @@ -132,8 +136,14 @@ mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, return MT_PACKET_ID_NO_ACK; if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | - IEEE80211_TX_CTL_RATE_CTRL_PROBE))) + IEEE80211_TX_CTL_RATE_CTRL_PROBE))) { + if (mtk_wed_device_active(&dev->mmio.wed) && + ((info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) || + ieee80211_is_data(hdr->frame_control))) + return MT_PACKET_ID_WED; + return MT_PACKET_ID_NO_SKB; + } spin_lock_bh(&dev->status_lock); @@ -241,9 +251,7 @@ void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff * rcu_read_lock(); - if (wcid_idx < ARRAY_SIZE(dev->wcid)) - wcid = rcu_dereference(dev->wcid[wcid_idx]); - + wcid = __mt76_wcid_ptr(dev, wcid_idx); mt76_tx_check_non_aql(dev, wcid, skb); #ifdef CONFIG_NL80211_TESTMODE @@ -261,9 +269,18 @@ void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff * #endif if (cb->pktid < MT_PACKET_ID_FIRST) { + struct ieee80211_rate_status rs = {}; + hw = mt76_tx_status_get_hw(dev, skb); status.sta = wcid_to_sta(wcid); + if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) { + rs.rate_idx = wcid->rate; + status.rates = &rs; + status.n_rates = 1; + } + spin_lock_bh(&dev->rx_lock); ieee80211_tx_status_ext(hw, &status); + spin_unlock_bh(&dev->rx_lock); goto out; } @@ -290,11 +307,14 @@ __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb, int idx; non_aql = !info->tx_time_est; - idx = dev->queue_ops->tx_queue_skb(dev, q, qid, skb, wcid, sta); + idx = dev->queue_ops->tx_queue_skb(phy, q, qid, skb, wcid, sta); if (idx < 0 || !sta) return idx; wcid = (struct mt76_wcid *)sta->drv_priv; + if (!wcid->sta) + return idx; + q->entry[idx].wcid = wcid->idx; if (!non_aql) @@ -311,40 +331,41 @@ void mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, struct mt76_wcid *wcid, struct sk_buff *skb) { - struct mt76_dev *dev = phy->dev; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct mt76_queue *q; - int qid = skb_get_queue_mapping(skb); + struct ieee80211_hdr *hdr = (void *)skb->data; + struct sk_buff_head *head; if (mt76_testmode_enabled(phy)) { ieee80211_free_txskb(phy->hw, skb); return; } - if (WARN_ON(qid >= MT_TXQ_PSD)) { - qid = MT_TXQ_BE; - skb_set_queue_mapping(skb, qid); - } - - if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && - !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && - !ieee80211_is_data(hdr->frame_control) && - !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) { - qid = MT_TXQ_PSD; - } + if (WARN_ON(skb_get_queue_mapping(skb) >= MT_TXQ_PSD)) + skb_set_queue_mapping(skb, MT_TXQ_BE); if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET)) ieee80211_get_tx_rates(info->control.vif, sta, skb, info->control.rates, 1); info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx); - q = phy->q_tx[qid]; - spin_lock_bh(&q->lock); - __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL); - dev->queue_ops->kick(dev, q); - spin_unlock_bh(&q->lock); + if ((info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) || + ((info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK) && + ieee80211_is_probe_req(hdr->frame_control))) + head = &wcid->tx_offchannel; + else + head = &wcid->tx_pending; + + spin_lock_bh(&head->lock); + __skb_queue_tail(head, skb); + spin_unlock_bh(&head->lock); + + spin_lock_bh(&phy->tx_lock); + if (list_empty(&wcid->tx_list)) + list_add_tail(&wcid->tx_list, &phy->tx_list); + spin_unlock_bh(&phy->tx_lock); + + mt76_worker_schedule(&phy->dev->tx_worker); } EXPORT_SYMBOL_GPL(mt76_tx); @@ -468,8 +489,8 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q, return idx; do { - if (test_bit(MT76_RESET, &phy->state)) - return -EBUSY; + if (test_bit(MT76_RESET, &phy->state) || phy->offchannel) + break; if (stop || mt76_txq_stopped(q)) break; @@ -502,33 +523,35 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q, static int mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) { - struct mt76_queue *q = phy->q_tx[qid]; struct mt76_dev *dev = phy->dev; struct ieee80211_txq *txq; struct mt76_txq *mtxq; struct mt76_wcid *wcid; + struct mt76_queue *q; int ret = 0; while (1) { int n_frames = 0; - if (test_bit(MT76_RESET, &phy->state)) - return -EBUSY; - - if (dev->queue_ops->tx_cleanup && - q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) { - dev->queue_ops->tx_cleanup(dev, q, false); - } - txq = ieee80211_next_txq(phy->hw, qid); if (!txq) break; mtxq = (struct mt76_txq *)txq->drv_priv; - wcid = rcu_dereference(dev->wcid[mtxq->wcid]); + wcid = __mt76_wcid_ptr(dev, mtxq->wcid); if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags)) continue; + phy = mt76_dev_phy(dev, wcid->phy_idx); + if (test_bit(MT76_RESET, &phy->state) || phy->offchannel) + continue; + + q = phy->q_tx[qid]; + if (dev->queue_ops->tx_cleanup && + q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) { + dev->queue_ops->tx_cleanup(dev, q, false); + } + if (mtxq->send_bar && mtxq->aggr) { struct ieee80211_txq *txq = mtxq_to_txq(mtxq); struct ieee80211_sta *sta = txq->sta; @@ -575,10 +598,99 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid) } EXPORT_SYMBOL_GPL(mt76_txq_schedule); +static int +mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid, + struct sk_buff_head *head) +{ + struct mt76_dev *dev = phy->dev; + struct ieee80211_sta *sta; + struct mt76_queue *q; + struct sk_buff *skb; + int ret = 0; + + spin_lock(&head->lock); + while ((skb = skb_peek(head)) != NULL) { + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int qid = skb_get_queue_mapping(skb); + + if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && + !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && + !ieee80211_is_data(hdr->frame_control) && + (!ieee80211_is_bufferable_mmpdu(skb) || + ieee80211_is_deauth(hdr->frame_control) || + head == &wcid->tx_offchannel)) + qid = MT_TXQ_PSD; + + q = phy->q_tx[qid]; + if (mt76_txq_stopped(q) || test_bit(MT76_RESET, &phy->state)) { + ret = -1; + break; + } + + __skb_unlink(skb, head); + spin_unlock(&head->lock); + + sta = wcid_to_sta(wcid); + spin_lock(&q->lock); + __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL); + dev->queue_ops->kick(dev, q); + spin_unlock(&q->lock); + + spin_lock(&head->lock); + } + spin_unlock(&head->lock); + + return ret; +} + +static void mt76_txq_schedule_pending(struct mt76_phy *phy) +{ + LIST_HEAD(tx_list); + int ret = 0; + + if (list_empty(&phy->tx_list)) + return; + + local_bh_disable(); + rcu_read_lock(); + + spin_lock(&phy->tx_lock); + list_splice_init(&phy->tx_list, &tx_list); + while (!list_empty(&tx_list)) { + struct mt76_wcid *wcid; + + wcid = list_first_entry(&tx_list, struct mt76_wcid, tx_list); + list_del_init(&wcid->tx_list); + + spin_unlock(&phy->tx_lock); + if (ret >= 0) + ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_offchannel); + if (ret >= 0 && !phy->offchannel) + ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_pending); + spin_lock(&phy->tx_lock); + + if (!skb_queue_empty(&wcid->tx_pending) && + !skb_queue_empty(&wcid->tx_offchannel) && + list_empty(&wcid->tx_list)) + list_add_tail(&wcid->tx_list, &phy->tx_list); + } + spin_unlock(&phy->tx_lock); + + rcu_read_unlock(); + local_bh_enable(); +} + void mt76_txq_schedule_all(struct mt76_phy *phy) { + struct mt76_phy *main_phy = &phy->dev->phy; int i; + mt76_txq_schedule_pending(phy); + + if (phy != main_phy && phy->hw == main_phy->hw) + return; + for (i = 0; i <= MT_TXQ_BK; i++) mt76_txq_schedule(phy, i); } @@ -589,6 +701,7 @@ void mt76_tx_worker_run(struct mt76_dev *dev) struct mt76_phy *phy; int i; + mt76_txq_schedule_all(&dev->phy); for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { phy = dev->phys[i]; if (!phy) @@ -644,9 +757,6 @@ void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) struct mt76_phy *phy = hw->priv; struct mt76_dev *dev = phy->dev; - if (!test_bit(MT76_STATE_RUNNING, &phy->state)) - return; - mt76_worker_schedule(&dev->tx_worker); } EXPORT_SYMBOL_GPL(mt76_wake_tx_queue); @@ -737,8 +847,10 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) spin_lock_bh(&dev->token_lock); - token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC); - if (token >= 0) + token = idr_alloc(&dev->token, *ptxwi, dev->token_start, + dev->token_start + dev->token_size, + GFP_ATOMIC); + if (token >= dev->token_start) dev->token_count++; #ifdef CONFIG_NET_MEDIATEK_SOC_WED |
