summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/mediatek/mt76/tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/mediatek/mt76/tx.c')
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c84
1 files changed, 57 insertions, 27 deletions
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index f0f7a913eaab..11719ef034d8 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -38,21 +38,21 @@ EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn);
void
mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
- __acquires(&dev->status_list.lock)
+ __acquires(&dev->status_lock)
{
__skb_queue_head_init(list);
- spin_lock_bh(&dev->status_list.lock);
+ spin_lock_bh(&dev->status_lock);
}
EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
void
mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
- __releases(&dev->status_list.lock)
+ __releases(&dev->status_lock)
{
struct ieee80211_hw *hw;
struct sk_buff *skb;
- spin_unlock_bh(&dev->status_list.lock);
+ spin_unlock_bh(&dev->status_lock);
rcu_read_lock();
while ((skb = __skb_dequeue(list)) != NULL) {
@@ -64,9 +64,13 @@ mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
struct mt76_wcid *wcid;
wcid = rcu_dereference(dev->wcid[cb->wcid]);
- if (wcid)
+ if (wcid) {
status.sta = wcid_to_sta(wcid);
+ if (status.sta)
+ status.rate = &wcid->rate;
+ }
+
hw = mt76_tx_status_get_hw(dev, skb);
ieee80211_tx_status_ext(hw, &status);
}
@@ -88,8 +92,6 @@ __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
if ((flags & done) != done)
return;
- __skb_unlink(skb, &dev->status_list);
-
/* Tx status can be unreliable. if it fails, mark the frame as ACKed */
if (flags & MT_TX_CB_TXS_FAILED) {
info->status.rates[0].count = 0;
@@ -116,6 +118,8 @@ mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
int pid;
+ memset(cb, 0, sizeof(*cb));
+
if (!wcid)
return MT_PACKET_ID_NO_ACK;
@@ -126,16 +130,23 @@ mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
return MT_PACKET_ID_NO_SKB;
- spin_lock_bh(&dev->status_list.lock);
+ spin_lock_bh(&dev->status_lock);
+
+ pid = idr_alloc(&wcid->pktid, skb, MT_PACKET_ID_FIRST,
+ MT_PACKET_ID_MASK, GFP_ATOMIC);
+ if (pid < 0) {
+ pid = MT_PACKET_ID_NO_SKB;
+ goto out;
+ }
- memset(cb, 0, sizeof(*cb));
- pid = mt76_get_next_pkt_id(wcid);
cb->wcid = wcid->idx;
cb->pktid = pid;
- cb->jiffies = jiffies;
- __skb_queue_tail(&dev->status_list, skb);
- spin_unlock_bh(&dev->status_list.lock);
+ if (list_empty(&wcid->list))
+ list_add_tail(&wcid->list, &dev->wcid_list);
+
+out:
+ spin_unlock_bh(&dev->status_lock);
return pid;
}
@@ -145,36 +156,53 @@ struct sk_buff *
mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
struct sk_buff_head *list)
{
- struct sk_buff *skb, *tmp;
+ struct sk_buff *skb;
+ int id;
- skb_queue_walk_safe(&dev->status_list, skb, tmp) {
- struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
+ lockdep_assert_held(&dev->status_lock);
- if (wcid && cb->wcid != wcid->idx)
- continue;
+ skb = idr_remove(&wcid->pktid, pktid);
+ if (skb)
+ goto out;
- if (cb->pktid == pktid)
- return skb;
+ /* look for stale entries in the wcid idr queue */
+ idr_for_each_entry(&wcid->pktid, skb, id) {
+ struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
- if (pktid >= 0 && !time_after(jiffies, cb->jiffies +
- MT_TX_STATUS_SKB_TIMEOUT))
- continue;
+ if (pktid >= 0) {
+ if (!(cb->flags & MT_TX_CB_DMA_DONE))
+ continue;
+
+ if (!time_is_after_jiffies(cb->jiffies +
+ MT_TX_STATUS_SKB_TIMEOUT))
+ continue;
+ }
+ /* It has been too long since DMA_DONE, time out this packet
+ * and stop waiting for TXS callback.
+ */
+ idr_remove(&wcid->pktid, cb->pktid);
__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
MT_TX_CB_TXS_DONE, list);
}
- return NULL;
+out:
+ if (idr_is_empty(&wcid->pktid))
+ list_del_init(&wcid->list);
+
+ return skb;
}
EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
void
-mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
+mt76_tx_status_check(struct mt76_dev *dev, bool flush)
{
+ struct mt76_wcid *wcid, *tmp;
struct sk_buff_head list;
mt76_tx_status_lock(dev, &list);
- mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
+ list_for_each_entry_safe(wcid, tmp, &dev->wcid_list, list)
+ mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
mt76_tx_status_unlock(dev, &list);
}
EXPORT_SYMBOL_GPL(mt76_tx_status_check);
@@ -197,6 +225,7 @@ mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid,
void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb,
struct list_head *free_list)
{
+ struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
struct ieee80211_tx_status status = {
.skb = skb,
.free_list = free_list,
@@ -226,7 +255,7 @@ void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *
}
#endif
- if (!skb->prev) {
+ if (cb->pktid < MT_PACKET_ID_FIRST) {
hw = mt76_tx_status_get_hw(dev, skb);
status.sta = wcid_to_sta(wcid);
ieee80211_tx_status_ext(hw, &status);
@@ -234,6 +263,7 @@ void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *
}
mt76_tx_status_lock(dev, &list);
+ cb->jiffies = jiffies;
__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
mt76_tx_status_unlock(dev, &list);