summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2023-12-07 04:50:15 +0200
committerJohannes Berg <johannes.berg@intel.com>2023-12-12 10:37:00 +0100
commit14c1b6f430e3582b3ef2ce1e3016de829b520f77 (patch)
treedfa6cb00084f7bc807f650ae00824a3de9e347e5 /drivers/net/wireless/intel/iwlwifi/pcie/tx.c
parented44bab6ba2112824b27bdfe185c8f18d6017e56 (diff)
wifi: iwlwifi: remove async command callback
There's only one user of this code, which is STA unblock during sleep for uAPSD on really old devices. Instead of having this all through the API with calls up and down, just implemented a special-case CMD_BLOCK_TXQS flag for this, it's only needed in the old gen1 transport. While at it, fix a complain that lockdep would have, as we lock the cmd queue and then the TXQs in the reclaim by using spin_lock_nested(). We no longer need to disable BHs in iwl_trans_pcie_block_txq_ptrs() since it's called with them disabled already. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com> Link: https://msgid.link/20231207044813.2bd95e0570fc.I16486dbc82570d2f73a585872f5394698627310d@changeid Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/pcie/tx.c')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c34
1 files changed, 32 insertions, 2 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 2f39b639c43f..6c2b37e56c78 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -873,6 +873,33 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
+static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
+{
+ int i;
+
+ for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
+ struct iwl_txq *txq = trans->txqs.txq[i];
+
+ if (i == trans->txqs.cmd.q_id)
+ continue;
+
+ /* we skip the command queue (obviously) so it's OK to nest */
+ spin_lock_nested(&txq->lock, 1);
+
+ if (!block && !(WARN_ON_ONCE(!txq->block))) {
+ txq->block--;
+ if (!txq->block) {
+ iwl_write32(trans, HBUS_TARG_WRPTR,
+ txq->write_ptr | (i << 8));
+ }
+ } else if (block) {
+ txq->block++;
+ }
+
+ spin_unlock(&txq->lock);
+ }
+}
+
/*
* iwl_pcie_enqueue_hcmd - enqueue a uCode command
* @priv: device private data point
@@ -1137,6 +1164,9 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
goto out;
}
+ if (cmd->flags & CMD_BLOCK_TXQS)
+ iwl_trans_pcie_block_txq_ptrs(trans, true);
+
/* Increment and update queue's write index */
txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
@@ -1202,8 +1232,8 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
meta->source->_rx_page_order = trans_pcie->rx_page_order;
}
- if (meta->flags & CMD_WANT_ASYNC_CALLBACK)
- iwl_op_mode_async_cb(trans->op_mode, cmd);
+ if (meta->flags & CMD_BLOCK_TXQS)
+ iwl_trans_pcie_block_txq_ptrs(trans, false);
iwl_pcie_cmdq_reclaim(trans, txq_id, index);