summaryrefslogtreecommitdiff
path: root/net/mac80211
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@nbd.name>2019-03-15 11:03:35 +0100
committerJohannes Berg <johannes.berg@intel.com>2019-03-29 11:27:44 +0100
commit5b989c18dab2e82bac8a5564a174794bf84b20e6 (patch)
treedffe8a8325b9bcf306b7b5f77b09dc4a4f6cb606 /net/mac80211
parentd6db02a88a4aaa1cd7105137c67ddec7f3bdbc05 (diff)
mac80211: rework locking for txq scheduling / airtime fairness
Holding the lock around the entire duration of tx scheduling can create some nasty lock contention, especially when processing airtime information from the tx status or the rx path. Improve locking by only holding the active_txq_lock for lookups / scheduling list modifications. Signed-off-by: Felix Fietkau <nbd@nbd.name> Acked-by: Toke Høiland-Jørgensen <toke@redhat.com> Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'net/mac80211')
-rw-r--r--net/mac80211/tx.c44
1 files changed, 16 insertions, 28 deletions
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 5f546de10d96..134a3da147c6 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -3649,16 +3649,17 @@ EXPORT_SYMBOL(ieee80211_tx_dequeue);
struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
{
struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_txq *ret = NULL;
struct txq_info *txqi = NULL;
- lockdep_assert_held(&local->active_txq_lock[ac]);
+ spin_lock_bh(&local->active_txq_lock[ac]);
begin:
txqi = list_first_entry_or_null(&local->active_txqs[ac],
struct txq_info,
schedule_order);
if (!txqi)
- return NULL;
+ goto out;
if (txqi->txq.sta) {
struct sta_info *sta = container_of(txqi->txq.sta,
@@ -3675,21 +3676,25 @@ struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
if (txqi->schedule_round == local->schedule_round[ac])
- return NULL;
+ goto out;
list_del_init(&txqi->schedule_order);
txqi->schedule_round = local->schedule_round[ac];
- return &txqi->txq;
+ ret = &txqi->txq;
+
+out:
+ spin_unlock_bh(&local->active_txq_lock[ac]);
+ return ret;
}
EXPORT_SYMBOL(ieee80211_next_txq);
-void ieee80211_return_txq(struct ieee80211_hw *hw,
- struct ieee80211_txq *txq)
+void ieee80211_schedule_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
{
struct ieee80211_local *local = hw_to_local(hw);
struct txq_info *txqi = to_txq_info(txq);
- lockdep_assert_held(&local->active_txq_lock[txq->ac]);
+ spin_lock_bh(&local->active_txq_lock[txq->ac]);
if (list_empty(&txqi->schedule_order) &&
(!skb_queue_empty(&txqi->frags) || txqi->tin.backlog_packets)) {
@@ -3709,17 +3714,7 @@ void ieee80211_return_txq(struct ieee80211_hw *hw,
list_add_tail(&txqi->schedule_order,
&local->active_txqs[txq->ac]);
}
-}
-EXPORT_SYMBOL(ieee80211_return_txq);
-void ieee80211_schedule_txq(struct ieee80211_hw *hw,
- struct ieee80211_txq *txq)
- __acquires(txq_lock) __releases(txq_lock)
-{
- struct ieee80211_local *local = hw_to_local(hw);
-
- spin_lock_bh(&local->active_txq_lock[txq->ac]);
- ieee80211_return_txq(hw, txq);
spin_unlock_bh(&local->active_txq_lock[txq->ac]);
}
EXPORT_SYMBOL(ieee80211_schedule_txq);
@@ -3732,7 +3727,7 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
struct sta_info *sta;
u8 ac = txq->ac;
- lockdep_assert_held(&local->active_txq_lock[ac]);
+ spin_lock_bh(&local->active_txq_lock[ac]);
if (!txqi->txq.sta)
goto out;
@@ -3762,34 +3757,27 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
sta->airtime[ac].deficit += sta->airtime_weight;
list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]);
+ spin_unlock_bh(&local->active_txq_lock[ac]);
return false;
out:
if (!list_empty(&txqi->schedule_order))
list_del_init(&txqi->schedule_order);
+ spin_unlock_bh(&local->active_txq_lock[ac]);
return true;
}
EXPORT_SYMBOL(ieee80211_txq_may_transmit);
void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
- __acquires(txq_lock)
{
struct ieee80211_local *local = hw_to_local(hw);
spin_lock_bh(&local->active_txq_lock[ac]);
local->schedule_round[ac]++;
-}
-EXPORT_SYMBOL(ieee80211_txq_schedule_start);
-
-void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
- __releases(txq_lock)
-{
- struct ieee80211_local *local = hw_to_local(hw);
-
spin_unlock_bh(&local->active_txq_lock[ac]);
}
-EXPORT_SYMBOL(ieee80211_txq_schedule_end);
+EXPORT_SYMBOL(ieee80211_txq_schedule_start);
void __ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev,