summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/mediatek/mt76/tx.c
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@nbd.name>2023-08-29 10:39:53 +0200
committerFelix Fietkau <nbd@nbd.name>2023-09-30 20:03:03 +0200
commit0335c034e7265d36d956e806f33202c94a8a9860 (patch)
tree135dabd2b968ab97d046a84aa39df63151fa20b1 /drivers/net/wireless/mediatek/mt76/tx.c
parentdebd133ab2e2e06989c24e1a72be2b7997f30456 (diff)
wifi: mt76: fix race condition related to checking tx queue fill status
When drv_tx calls race against local tx scheduling, the queue fill status checks can potentially race, leading to dma queue entries being overwritten. Fix this by deferring packets from drv_tx calls to the tx worker, in order to ensure that all regular queue tx comes from the same context. Reported-by: Ryder Lee <Ryder.Lee@mediatek.com> Signed-off-by: Felix Fietkau <nbd@nbd.name>
Diffstat (limited to 'drivers/net/wireless/mediatek/mt76/tx.c')
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c108
1 files changed, 88 insertions, 20 deletions
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 6cc26cc6c517..1809b03292c3 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -329,40 +329,32 @@ void
mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb)
{
- struct mt76_dev *dev = phy->dev;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct mt76_queue *q;
- int qid = skb_get_queue_mapping(skb);
if (mt76_testmode_enabled(phy)) {
ieee80211_free_txskb(phy->hw, skb);
return;
}
- if (WARN_ON(qid >= MT_TXQ_PSD)) {
- qid = MT_TXQ_BE;
- skb_set_queue_mapping(skb, qid);
- }
-
- if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) &&
- !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
- !ieee80211_is_data(hdr->frame_control) &&
- !ieee80211_is_bufferable_mmpdu(skb)) {
- qid = MT_TXQ_PSD;
- }
+ if (WARN_ON(skb_get_queue_mapping(skb) >= MT_TXQ_PSD))
+ skb_set_queue_mapping(skb, MT_TXQ_BE);
if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET))
ieee80211_get_tx_rates(info->control.vif, sta, skb,
info->control.rates, 1);
info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
- q = phy->q_tx[qid];
- spin_lock_bh(&q->lock);
- __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL);
- dev->queue_ops->kick(dev, q);
- spin_unlock_bh(&q->lock);
+ spin_lock_bh(&wcid->tx_pending.lock);
+ __skb_queue_tail(&wcid->tx_pending, skb);
+ spin_unlock_bh(&wcid->tx_pending.lock);
+
+ spin_lock_bh(&phy->tx_lock);
+ if (list_empty(&wcid->tx_list))
+ list_add_tail(&wcid->tx_list, &phy->tx_list);
+ spin_unlock_bh(&phy->tx_lock);
+
+ mt76_worker_schedule(&phy->dev->tx_worker);
}
EXPORT_SYMBOL_GPL(mt76_tx);
@@ -593,10 +585,86 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule);
+static int
+mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid)
+{
+ struct mt76_dev *dev = phy->dev;
+ struct ieee80211_sta *sta;
+ struct mt76_queue *q;
+ struct sk_buff *skb;
+ int ret = 0;
+
+ spin_lock(&wcid->tx_pending.lock);
+ while ((skb = skb_peek(&wcid->tx_pending)) != NULL) {
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ int qid = skb_get_queue_mapping(skb);
+
+ if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) &&
+ !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
+ !ieee80211_is_data(hdr->frame_control) &&
+ !ieee80211_is_bufferable_mmpdu(skb))
+ qid = MT_TXQ_PSD;
+
+ q = phy->q_tx[qid];
+ if (mt76_txq_stopped(q)) {
+ ret = -1;
+ break;
+ }
+
+ __skb_unlink(skb, &wcid->tx_pending);
+ spin_unlock(&wcid->tx_pending.lock);
+
+ sta = wcid_to_sta(wcid);
+ spin_lock(&q->lock);
+ __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL);
+ dev->queue_ops->kick(dev, q);
+ spin_unlock(&q->lock);
+
+ spin_lock(&wcid->tx_pending.lock);
+ }
+ spin_unlock(&wcid->tx_pending.lock);
+
+ return ret;
+}
+
+static void mt76_txq_schedule_pending(struct mt76_phy *phy)
+{
+ if (list_empty(&phy->tx_list))
+ return;
+
+ local_bh_disable();
+ rcu_read_lock();
+
+ spin_lock(&phy->tx_lock);
+ while (!list_empty(&phy->tx_list)) {
+ struct mt76_wcid *wcid = NULL;
+ int ret;
+
+ wcid = list_first_entry(&phy->tx_list, struct mt76_wcid, tx_list);
+ list_del_init(&wcid->tx_list);
+
+ spin_unlock(&phy->tx_lock);
+ ret = mt76_txq_schedule_pending_wcid(phy, wcid);
+ spin_lock(&phy->tx_lock);
+
+ if (ret) {
+ if (list_empty(&wcid->tx_list))
+ list_add_tail(&wcid->tx_list, &phy->tx_list);
+ break;
+ }
+ }
+ spin_unlock(&phy->tx_lock);
+
+ rcu_read_unlock();
+ local_bh_enable();
+}
+
void mt76_txq_schedule_all(struct mt76_phy *phy)
{
int i;
+ mt76_txq_schedule_pending(phy);
for (i = 0; i <= MT_TXQ_BK; i++)
mt76_txq_schedule(phy, i);
}