From 8a7f9fd8a3e09c829c9fc2a86fe2d370ebcafd95 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Tue, 25 Jun 2019 10:40:45 -0700 Subject: mwifiex: don't disable hardirqs; just softirqs main_proc_lock and int_lock (in mwifiex_adapter) are the only spinlocks used in hardirq contexts. The rest are only in task or softirq contexts. Convert every other lock from *_irq{save,restore}() variants to _bh() variants. This is a mechanical transformation of all spinlock usage in mwifiex using the following: Step 1: I ran this nasty sed script: sed -i -E '/spin_lock_irqsave|spin_unlock_irqrestore/ { /main_proc_lock|int_lock/! { s:(spin_(un|)lock)_irq(save|restore):\1_bh: ; # Join broken lines. :a /;$/! { N; s/\s*\n\s*//; ba } /,.*\);$/ s:,.*\):\): } }' drivers/net/wireless/marvell/mwifiex/* Step 2: Manually delete the flags / ra_list_flags args from: mwifiex_send_single_packet() mwifiex_11n_aggregate_pkt() mwifiex_send_processed_packet() which are now unused. Step 3: Apply this semantic patch (coccinelle) to remove the unused 'flags' variables: // @@ type T; identifier i; @@ ( extern T i; | - T i; ... when != i ) // (Usage is something like this: make coccicheck COCCI=./patch.cocci MODE=patch M=drivers/net/wireless/marvell/mwifiex/ although this skips *.h files for some reasons, so I had to massage stuff.) Testing: I've played with a variety of stress tests, including download stress tests on the same APs which caught regressions with commit 5188d5453bc9 ("mwifiex: restructure rx_reorder_tbl_lock usage"). I've primarily tested on Marvell 8997 / PCIe, although I've given 8897 / SDIO a quick spin as well. Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- .../net/wireless/marvell/mwifiex/11n_rxreorder.c | 86 +++++++++------------- 1 file changed, 36 insertions(+), 50 deletions(-) (limited to 'drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c') diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c index 77bdf16d6573..05a3c61ac603 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c @@ -113,10 +113,9 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, struct sk_buff_head list; struct sk_buff *skb; int pkt_to_send, i; - unsigned long flags; __skb_queue_head_init(&list); - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); + spin_lock_bh(&priv->rx_reorder_tbl_lock); pkt_to_send = (start_win > tbl->start_win) ? min((start_win - tbl->start_win), tbl->win_size) : @@ -140,7 +139,7 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, } tbl->start_win = start_win; - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); + spin_unlock_bh(&priv->rx_reorder_tbl_lock); while ((skb = __skb_dequeue(&list))) mwifiex_11n_dispatch_pkt(priv, skb); @@ -161,10 +160,9 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, struct sk_buff_head list; struct sk_buff *skb; int i, j, xchg; - unsigned long flags; __skb_queue_head_init(&list); - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); + spin_lock_bh(&priv->rx_reorder_tbl_lock); for (i = 0; i < tbl->win_size; ++i) { if (!tbl->rx_reorder_ptr[i]) @@ -187,7 +185,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, } tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1); - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); + spin_unlock_bh(&priv->rx_reorder_tbl_lock); while ((skb = __skb_dequeue(&list))) mwifiex_11n_dispatch_pkt(priv, skb); @@ -203,19 +201,18 @@ static void mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv, struct mwifiex_rx_reorder_tbl *tbl) { - unsigned long flags; int start_win; if (!tbl) return; - spin_lock_irqsave(&priv->adapter->rx_proc_lock, flags); + spin_lock_bh(&priv->adapter->rx_proc_lock); priv->adapter->rx_locked = true; if (priv->adapter->rx_processing) { - spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags); + spin_unlock_bh(&priv->adapter->rx_proc_lock); flush_workqueue(priv->adapter->rx_workqueue); } else { - spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags); + spin_unlock_bh(&priv->adapter->rx_proc_lock); } start_win = (tbl->start_win + tbl->win_size) & (MAX_TID_VALUE - 1); @@ -224,16 +221,16 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv, del_timer_sync(&tbl->timer_context.timer); tbl->timer_context.timer_is_set = false; - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); + spin_lock_bh(&priv->rx_reorder_tbl_lock); list_del(&tbl->list); - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); + spin_unlock_bh(&priv->rx_reorder_tbl_lock); kfree(tbl->rx_reorder_ptr); kfree(tbl); - spin_lock_irqsave(&priv->adapter->rx_proc_lock, flags); + spin_lock_bh(&priv->adapter->rx_proc_lock); priv->adapter->rx_locked = false; - spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags); + spin_unlock_bh(&priv->adapter->rx_proc_lock); } @@ -245,17 +242,15 @@ struct mwifiex_rx_reorder_tbl * mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta) { struct mwifiex_rx_reorder_tbl *tbl; - unsigned long flags; - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); + spin_lock_bh(&priv->rx_reorder_tbl_lock); list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) { if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) { - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, - flags); + spin_unlock_bh(&priv->rx_reorder_tbl_lock); return tbl; } } - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); + spin_unlock_bh(&priv->rx_reorder_tbl_lock); return NULL; } @@ -266,21 +261,19 @@ mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta) void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta) { struct mwifiex_rx_reorder_tbl *tbl, *tmp; - unsigned long flags; if (!ta) return; - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); + spin_lock_bh(&priv->rx_reorder_tbl_lock); list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) { if (!memcmp(tbl->ta, ta, ETH_ALEN)) { - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, - flags); + spin_unlock_bh(&priv->rx_reorder_tbl_lock); mwifiex_del_rx_reorder_entry(priv, tbl); - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); + spin_lock_bh(&priv->rx_reorder_tbl_lock); } } - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); + spin_unlock_bh(&priv->rx_reorder_tbl_lock); return; } @@ -294,18 +287,16 @@ mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx) { struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr; struct mwifiex_private *priv = ctx->priv; - unsigned long flags; int i; - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); + spin_lock_bh(&priv->rx_reorder_tbl_lock); for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) { if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) { - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, - flags); + spin_unlock_bh(&priv->rx_reorder_tbl_lock); return i; } } - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); + spin_unlock_bh(&priv->rx_reorder_tbl_lock); return -1; } @@ -353,7 +344,6 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, int i; struct mwifiex_rx_reorder_tbl *tbl, *new_node; u16 last_seq = 0; - unsigned long flags; struct mwifiex_sta_node *node; /* @@ -377,7 +367,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, new_node->init_win = seq_num; new_node->flags = 0; - spin_lock_irqsave(&priv->sta_list_spinlock, flags); + spin_lock_bh(&priv->sta_list_spinlock); if (mwifiex_queuing_ra_based(priv)) { if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) { node = mwifiex_get_sta_entry(priv, ta); @@ -391,7 +381,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, else last_seq = priv->rx_seq[tid]; } - spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); + spin_unlock_bh(&priv->sta_list_spinlock); mwifiex_dbg(priv->adapter, INFO, "info: last_seq=%d start_win=%d\n", @@ -423,9 +413,9 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, for (i = 0; i < win_size; ++i) new_node->rx_reorder_ptr[i] = NULL; - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); + spin_lock_bh(&priv->rx_reorder_tbl_lock); list_add_tail(&new_node->list, &priv->rx_reorder_tbl_ptr); - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); + spin_unlock_bh(&priv->rx_reorder_tbl_lock); } static void @@ -481,18 +471,17 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv, u32 rx_win_size = priv->add_ba_param.rx_win_size; u8 tid; int win_size; - unsigned long flags; uint16_t block_ack_param_set; if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) && priv->adapter->is_hw_11ac_capable && memcmp(priv->cfg_bssid, cmd_addba_req->peer_mac_addr, ETH_ALEN)) { - spin_lock_irqsave(&priv->sta_list_spinlock, flags); + spin_lock_bh(&priv->sta_list_spinlock); sta_ptr = mwifiex_get_sta_entry(priv, cmd_addba_req->peer_mac_addr); if (!sta_ptr) { - spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); + spin_unlock_bh(&priv->sta_list_spinlock); mwifiex_dbg(priv->adapter, ERROR, "BA setup with unknown TDLS peer %pM!\n", cmd_addba_req->peer_mac_addr); @@ -500,7 +489,7 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv, } if (sta_ptr->is_11ac_enabled) rx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE; - spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); + spin_unlock_bh(&priv->sta_list_spinlock); } cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_RSP); @@ -687,7 +676,6 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac, struct mwifiex_tx_ba_stream_tbl *ptx_tbl; struct mwifiex_ra_list_tbl *ra_list; u8 cleanup_rx_reorder_tbl; - unsigned long flags; int tid_down; if (type == TYPE_DELBA_RECEIVE) @@ -721,9 +709,9 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac, ra_list->amsdu_in_ampdu = false; ra_list->ba_status = BA_SETUP_NONE; } - spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags); + spin_lock_bh(&priv->tx_ba_stream_tbl_lock); mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, ptx_tbl); - spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags); + spin_unlock_bh(&priv->tx_ba_stream_tbl_lock); } } @@ -809,17 +797,16 @@ void mwifiex_11n_ba_stream_timeout(struct mwifiex_private *priv, void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv) { struct mwifiex_rx_reorder_tbl *del_tbl_ptr, *tmp_node; - unsigned long flags; - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); + spin_lock_bh(&priv->rx_reorder_tbl_lock); list_for_each_entry_safe(del_tbl_ptr, tmp_node, &priv->rx_reorder_tbl_ptr, list) { - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); + spin_unlock_bh(&priv->rx_reorder_tbl_lock); mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr); - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); + spin_lock_bh(&priv->rx_reorder_tbl_lock); } INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr); - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); + spin_unlock_bh(&priv->rx_reorder_tbl_lock); mwifiex_reset_11n_rx_seq_num(priv); } @@ -831,7 +818,6 @@ void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags) { struct mwifiex_private *priv; struct mwifiex_rx_reorder_tbl *tbl; - unsigned long lock_flags; int i; for (i = 0; i < adapter->priv_num; i++) { @@ -839,10 +825,10 @@ void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags) if (!priv) continue; - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, lock_flags); + spin_lock_bh(&priv->rx_reorder_tbl_lock); list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) tbl->flags = flags; - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, lock_flags); + spin_unlock_bh(&priv->rx_reorder_tbl_lock); } return; -- cgit