diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-20 20:01:26 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-20 20:01:26 -0700 |
commit | 087afe8aaf562dc7a53f2577049830d6a3245742 (patch) | |
tree | 94fe422e62965b24030019368cb9ec4f9c90cd38 /drivers/net/wireless/intel/iwlwifi/mvm | |
parent | 54cf809b9512be95f53ed4a5e3b631d1ac42f0fa (diff) | |
parent | 95829b3a9c0b1d88778b23bc2afdf5a83de066ff (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes and more updates from David Miller:
1) Tunneling fixes from Tom Herbert and Alexander Duyck.
2) AF_UNIX updates some struct sock bit fields with the socket lock,
whereas setsockopt() sets overlapping ones with locking. Seperate
out the synchronized vs. the AF_UNIX unsynchronized ones to avoid
corruption. From Andrey Ryabinin.
3) Mount BPF filesystem with mount_nodev rather than mount_ns, from
Eric Biederman.
4) A couple kmemdup conversions, from Muhammad Falak R Wani.
5) BPF verifier fixes from Alexei Starovoitov.
6) Don't let tunneled UDP packets get stuck in socket queues, if
something goes wrong during the encapsulation just drop the packet
rather than signalling an error up the call stack. From Hannes
Frederic Sowa.
7) SKB ref after free in batman-adv, from Florian Westphal.
8) TCP iSCSI, ocfs2, rds, and tipc have to disable BH in it's TCP
callbacks since the TCP stack runs pre-emptibly now. From Eric
Dumazet.
9) Fix crash in fixed_phy_add, from Rabin Vincent.
10) Fix length checks in xen-netback, from Paul Durrant.
11) Fix mixup in KEY vs KEYID macsec attributes, from Sabrina Dubroca.
12) RDS connection spamming bug fixes from Sowmini Varadhan
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (152 commits)
net: suppress warnings on dev_alloc_skb
uapi glibc compat: fix compilation when !__USE_MISC in glibc
udp: prevent skbs lingering in tunnel socket queues
bpf: teach verifier to recognize imm += ptr pattern
bpf: support decreasing order in direct packet access
net: usb: ch9200: use kmemdup
ps3_gelic: use kmemdup
net:liquidio: use kmemdup
bpf: Use mount_nodev not mount_ns to mount the bpf filesystem
net: cdc_ncm: update datagram size after changing mtu
tuntap: correctly wake up process during uninit
intel: Add support for IPv6 IP-in-IP offload
ip6_gre: Do not allow segmentation offloads GRE_CSUM is enabled with FOU/GUE
RDS: TCP: Avoid rds connection churn from rogue SYNs
RDS: TCP: rds_tcp_accept_worker() must exit gracefully when terminating rds-tcp
net: sock: move ->sk_shutdown out of bitfields.
ipv6: Don't reset inner headers in ip6_tnl_xmit
ip4ip6: Support for GSO/GRO
ip6ip6: Support for GSO/GRO
ipv6: Set features for IPv6 tunnels
...
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/mvm')
21 files changed, 1111 insertions, 137 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index b96b1c6a97fa..4eeb6b78d952 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -109,6 +109,7 @@ #define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1 #define IWL_MVM_TOF_IS_RESPONDER 0 #define IWL_MVM_SW_TX_CSUM_OFFLOAD 0 +#define IWL_MVM_HW_CSUM_DISABLE 0 #define IWL_MVM_COLLECT_FW_ERR_DUMP 1 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index e3561bbc2468..4fdc3dad3e85 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1804,7 +1804,6 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, struct iwl_wowlan_status *fw_status; int i; bool keep; - struct ieee80211_sta *ap_sta; struct iwl_mvm_sta *mvm_ap_sta; fw_status = iwl_mvm_get_wakeup_status(mvm, vif); @@ -1823,13 +1822,10 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, status.wake_packet = fw_status->wake_packet; /* still at hard-coded place 0 for D3 image */ - ap_sta = rcu_dereference_protected( - mvm->fw_id_to_mac_id[0], - lockdep_is_held(&mvm->mutex)); - if (IS_ERR_OR_NULL(ap_sta)) + mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0); + if (!mvm_ap_sta) goto out_free; - mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta); for (i = 0; i < IWL_MAX_TID_COUNT; i++) { u16 seq = status.qos_seq_ctr[i]; /* firmware stores last-used value, we store next value */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index fb96bc00f022..b23271755daf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -281,13 +281,10 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file, if (vif->type == NL80211_IFTYPE_STATION && ap_sta_id != IWL_MVM_STATION_COUNT) { - struct ieee80211_sta *sta; - - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[ap_sta_id], - lockdep_is_held(&mvm->mutex)); - if (!IS_ERR_OR_NULL(sta)) { - struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_sta *mvm_sta; + mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id); + if (mvm_sta) { pos += scnprintf(buf+pos, bufsz-pos, "ap_sta_id %d - reduced Tx power %d\n", ap_sta_id, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 362a54601a80..406cf1cb945c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -1309,6 +1309,8 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file, PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA); PRINT_MVM_REF(IWL_MVM_REF_FW_DBG_COLLECT); PRINT_MVM_REF(IWL_MVM_REF_INIT_UCODE); + PRINT_MVM_REF(IWL_MVM_REF_SENDING_CMD); + PRINT_MVM_REF(IWL_MVM_REF_RX); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h index eec52c57f718..5f22cc7ac26a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h @@ -368,7 +368,7 @@ struct iwl_wowlan_gtk_status { u8 decrypt_key[16]; u8 tkip_mic_key[8]; struct iwl_wowlan_rsc_tsc_params_cmd rsc; -} __packed; +} __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */ struct iwl_wowlan_status { struct iwl_wowlan_gtk_status gtk; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h index 4c086d048097..1ca8e4988b88 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h @@ -437,21 +437,28 @@ struct iwl_rxq_sync_notification { /** * Internal message identifier * +* @IWL_MVM_RXQ_EMPTY: empty sync notification * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA */ enum iwl_mvm_rxq_notif_type { + IWL_MVM_RXQ_EMPTY, IWL_MVM_RXQ_NOTIF_DEL_BA, }; /** * struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent * in &iwl_rxq_sync_cmd. Should be DWORD aligned. +* FW is agnostic to the payload, so there are no endianity requirements. * * @type: value from &iwl_mvm_rxq_notif_type +* @sync: ctrl path is waiting for all notifications to be received +* @cookie: internal cookie to identify old notifications * @data: payload */ struct iwl_mvm_internal_rxq_notif { - u32 type; + u16 type; + u16 sync; + u32 cookie; u8 data[]; } __packed; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h index 90d911394836..38b1d045be8e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h @@ -173,7 +173,7 @@ enum iwl_sta_key_flag { /** * enum iwl_sta_modify_flag - indicate to the fw what flag are being changed - * @STA_MODIFY_KEY: this command modifies %key + * @STA_MODIFY_QUEUE_REMOVAL: this command removes a queue * @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx * @STA_MODIFY_TX_RATE: unused * @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid @@ -183,7 +183,7 @@ enum iwl_sta_key_flag { * @STA_MODIFY_QUEUES: modify the queues used by this station */ enum iwl_sta_modify_flag { - STA_MODIFY_KEY = BIT(0), + STA_MODIFY_QUEUE_REMOVAL = BIT(0), STA_MODIFY_TID_DISABLE_TX = BIT(1), STA_MODIFY_TX_RATE = BIT(2), STA_MODIFY_ADD_BA_TID = BIT(3), @@ -255,8 +255,10 @@ struct iwl_mvm_keyinfo { __le64 hw_tkip_mic_tx_key; } __packed; -#define IWL_ADD_STA_STATUS_MASK 0xFF -#define IWL_ADD_STA_BAID_MASK 0xFF00 +#define IWL_ADD_STA_STATUS_MASK 0xFF +#define IWL_ADD_STA_BAID_VALID_MASK 0x8000 +#define IWL_ADD_STA_BAID_MASK 0x7F00 +#define IWL_ADD_STA_BAID_SHIFT 8 /** * struct iwl_mvm_add_sta_cmd_v7 - Add/modify a station in the fw's sta table. diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index 60eed8485aba..41b80ae2d5f8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -90,6 +90,7 @@ enum { * DQA queue numbers * * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW + * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure * that we are never left without the possibility to connect to an AP. @@ -97,6 +98,8 @@ enum { * Each MGMT queue is mapped to a single STA * MGMT frames are frames that return true on ieee80211_is_mgmt() * @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames + * @IWL_MVM_DQA_AP_PROBE_RESP_QUEUE: a queue reserved for P2P GO/SoftAP probe + * responses * @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames. * DATA frames are intended for !ieee80211_is_mgmt() frames, but if * the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues @@ -105,10 +108,12 @@ enum { */ enum iwl_mvm_dqa_txq { IWL_MVM_DQA_CMD_QUEUE = 0, + IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2, IWL_MVM_DQA_GCAST_QUEUE = 3, IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4, IWL_MVM_DQA_MIN_MGMT_QUEUE = 5, IWL_MVM_DQA_MAX_MGMT_QUEUE = 8, + IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9, IWL_MVM_DQA_MIN_DATA_QUEUE = 10, IWL_MVM_DQA_MAX_DATA_QUEUE = 31, }; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index e25171f9b407..1ece19dcd9ac 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c @@ -271,9 +271,6 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size); i++) { - /* Mark the number of TXF we're pulling now */ - iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i); - fifo_hdr = (void *)(*dump_data)->data; fifo_data = (void *)fifo_hdr->data; fifo_len = mvm->shared_mem_cfg.internal_txfifo_size[i]; @@ -289,6 +286,10 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); fifo_hdr->fifo_num = cpu_to_le32(i); + + /* Mark the number of TXF we're pulling now */ + iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i); + fifo_hdr->available_bytes = cpu_to_le32(iwl_trans_read_prph(mvm->trans, TXF_CPU2_FIFO_ITEM_CNT)); @@ -339,9 +340,11 @@ void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm) #define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */ #define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */ -static const struct { +struct iwl_prph_range { u32 start, end; -} iwl_prph_dump_addr[] = { +}; + +static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = { { .start = 0x00a00000, .end = 0x00a00000 }, { .start = 0x00a0000c, .end = 0x00a00024 }, { .start = 0x00a0002c, .end = 0x00a0003c }, @@ -439,8 +442,18 @@ static const struct { { .start = 0x00a44000, .end = 0x00a7bf80 }, }; +static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = { + { .start = 0x00a05c00, .end = 0x00a05c18 }, + { .start = 0x00a05400, .end = 0x00a056e8 }, + { .start = 0x00a08000, .end = 0x00a098bc }, + { .start = 0x00adfc00, .end = 0x00adfd1c }, + { .start = 0x00a02400, .end = 0x00a02758 }, +}; + static u32 iwl_dump_prph(struct iwl_trans *trans, - struct iwl_fw_error_dump_data **data) + struct iwl_fw_error_dump_data **data, + const struct iwl_prph_range *iwl_prph_dump_addr, + u32 range_len) { struct iwl_fw_error_dump_prph *prph; unsigned long flags; @@ -449,7 +462,7 @@ static u32 iwl_dump_prph(struct iwl_trans *trans, if (!iwl_trans_grab_nic_access(trans, &flags)) return 0; - for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) { + for (i = 0; i < range_len; i++) { /* The range includes both boundaries */ int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - iwl_prph_dump_addr[i].start + 4; @@ -572,16 +585,31 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) } /* Make room for PRPH registers */ - for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) { + for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm); i++) { /* The range includes both boundaries */ - int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - - iwl_prph_dump_addr[i].start + 4; + int num_bytes_in_chunk = + iwl_prph_dump_addr_comm[i].end - + iwl_prph_dump_addr_comm[i].start + 4; prph_len += sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_prph) + num_bytes_in_chunk; } + if (mvm->cfg->mq_rx_supported) { + for (i = 0; i < + ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) { + /* The range includes both boundaries */ + int num_bytes_in_chunk = + iwl_prph_dump_addr_9000[i].end - + iwl_prph_dump_addr_9000[i].start + 4; + + prph_len += sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_prph) + + num_bytes_in_chunk; + } + } + if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ; } @@ -769,8 +797,16 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) } } - if (prph_len) - iwl_dump_prph(mvm->trans, &dump_data); + if (prph_len) { + iwl_dump_prph(mvm->trans, &dump_data, + iwl_prph_dump_addr_comm, + ARRAY_SIZE(iwl_prph_dump_addr_comm)); + + if (mvm->cfg->mq_rx_supported) + iwl_dump_prph(mvm->trans, &dump_data, + iwl_prph_dump_addr_9000, + ARRAY_SIZE(iwl_prph_dump_addr_9000)); + } dump_trans_data: fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index b70f4530f960..7057f35cb2e7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -535,7 +535,7 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, return true; } - WARN_ON(iwl_phy_db_set_section(phy_db, pkt, GFP_ATOMIC)); + WARN_ON(iwl_phy_db_set_section(phy_db, pkt)); return false; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 456067b2f48d..7aae068c02e5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -501,9 +501,11 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) switch (vif->type) { case NL80211_IFTYPE_P2P_DEVICE: - iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MVM_TX_FIFO_VO, 0, wdg_timeout); + if (!iwl_mvm_is_dqa_supported(mvm)) + iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, + IWL_MVM_OFFCHANNEL_QUEUE, + IWL_MVM_TX_FIFO_VO, 0, + wdg_timeout); break; case NL80211_IFTYPE_AP: iwl_mvm_enable_ac_txq(mvm, vif->cab_queue, vif->cab_queue, @@ -533,13 +535,21 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif) switch (vif->type) { case NL80211_IFTYPE_P2P_DEVICE: - iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MVM_OFFCHANNEL_QUEUE, IWL_MAX_TID_COUNT, - 0); + if (!iwl_mvm_is_dqa_supported(mvm)) + iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, + IWL_MVM_OFFCHANNEL_QUEUE, + IWL_MAX_TID_COUNT, 0); + break; case NL80211_IFTYPE_AP: iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue, IWL_MAX_TID_COUNT, 0); + + if (iwl_mvm_is_dqa_supported(mvm)) + iwl_mvm_disable_txq(mvm, + IWL_MVM_DQA_AP_PROBE_RESP_QUEUE, + vif->hw_queue[0], IWL_MAX_TID_COUNT, + 0); /* fall through */ default: /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 5ace468070cb..e5f267b21316 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -229,7 +229,11 @@ void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type); spin_lock_bh(&mvm->refs_lock); - WARN_ON(!mvm->refs[ref_type]--); + if (WARN_ON(!mvm->refs[ref_type])) { + spin_unlock_bh(&mvm->refs_lock); + return; + } + mvm->refs[ref_type]--; spin_unlock_bh(&mvm->refs_lock); iwl_trans_unref(mvm->trans); } @@ -439,11 +443,19 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); + if (iwl_mvm_has_new_rx_api(mvm)) + ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); + + if (mvm->trans->num_rx_queues > 1) + ieee80211_hw_set(hw, USES_RSS); if (mvm->trans->max_skb_frags) hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; - hw->queues = mvm->first_agg_queue; + if (!iwl_mvm_is_dqa_supported(mvm)) + hw->queues = mvm->first_agg_queue; + else + hw->queues = IEEE80211_MAX_QUEUES; hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | IEEE80211_RADIOTAP_MCS_HAVE_STBC; @@ -848,6 +860,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, u16 *ssn = ¶ms->ssn; u8 buf_size = params->buf_size; bool amsdu = params->amsdu; + u16 timeout = params->timeout; IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n", sta->addr, tid, action); @@ -888,10 +901,12 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, ret = -EINVAL; break; } - ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size); + ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size, + timeout); break; case IEEE80211_AMPDU_RX_STOP: - ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size); + ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size, + timeout); break; case IEEE80211_AMPDU_TX_START: if (!iwl_enable_tx_ampdu(mvm->cfg)) { @@ -4037,6 +4052,55 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, } } +void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, + struct iwl_mvm_internal_rxq_notif *notif, + u32 size) +{ + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq); + u32 qmask = BIT(mvm->trans->num_rx_queues) - 1; + int ret; + + lockdep_assert_held(&mvm->mutex); + + if (!iwl_mvm_has_new_rx_api(mvm)) + return; + + notif->cookie = mvm->queue_sync_cookie; + + if (notif->sync) + atomic_set(&mvm->queue_sync_counter, + mvm->trans->num_rx_queues); + + ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size); + if (ret) { + IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); + goto out; + } + + if (notif->sync) + ret = wait_event_timeout(notif_waitq, + atomic_read(&mvm->queue_sync_counter) == 0, + HZ); + WARN_ON_ONCE(!ret); + +out: + atomic_set(&mvm->queue_sync_counter, 0); + mvm->queue_sync_cookie++; +} + +static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_internal_rxq_notif data = { + .type = IWL_MVM_RXQ_EMPTY, + .sync = 1, + }; + + mutex_lock(&mvm->mutex); + iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data)); + mutex_unlock(&mvm->mutex); +} + const struct ieee80211_ops iwl_mvm_hw_ops = { .tx = iwl_mvm_mac_tx, .ampdu_action = iwl_mvm_mac_ampdu_action, @@ -4093,6 +4157,8 @@ const struct ieee80211_ops iwl_mvm_hw_ops = { .event_callback = iwl_mvm_mac_event_callback, + .sync_rx_queues = iwl_mvm_sync_rx_queues, + CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) #ifdef CONFIG_PM_SLEEP diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 85800ba0c667..ffbd41dcc0d4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -301,6 +301,8 @@ enum iwl_mvm_ref_type { IWL_MVM_REF_PROTECT_CSA, IWL_MVM_REF_FW_DBG_COLLECT, IWL_MVM_REF_INIT_UCODE, + IWL_MVM_REF_SENDING_CMD, + IWL_MVM_REF_RX, /* update debugfs.c when changing this */ @@ -613,6 +615,84 @@ struct iwl_mvm_shared_mem_cfg { u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; }; +/** + * struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer + * @head_sn: reorder window head sn + * @num_stored: number of mpdus stored in the buffer + * @buf_size: the reorder buffer size as set by the last addba request + * @sta_id: sta id of this reorder buffer + * @queue: queue of this reorder buffer + * @last_amsdu: track last ASMDU SN for duplication detection + * @last_sub_index: track ASMDU sub frame index for duplication detection + * @entries: list of skbs stored + * @reorder_time: time the packet was stored in the reorder buffer + * @reorder_timer: timer for frames are in the reorder buffer. For AMSDU + * it is the time of last received sub-frame + * @removed: prevent timer re-arming + * @lock: protect reorder buffer internal state + * @mvm: mvm pointer, needed for frame timer context + */ +struct iwl_mvm_reorder_buffer { + u16 head_sn; + u16 num_stored; + u8 buf_size; + u8 sta_id; + int queue; + u16 last_amsdu; + u8 last_sub_index; + struct sk_buff_head entries[IEEE80211_MAX_AMPDU_BUF]; + unsigned long reorder_time[IEEE80211_MAX_AMPDU_BUF]; + struct timer_list reorder_timer; + bool removed; + spinlock_t lock; + struct iwl_mvm *mvm; +} ____cacheline_aligned_in_smp; + +/** + * struct iwl_mvm_baid_data - BA session data + * @sta_id: station id + * @tid: tid of the session + * @baid baid of the session + * @timeout: the timeout set in the addba request + * @last_rx: last rx jiffies, updated only if timeout passed from last update + * @session_timer: timer to check if BA session expired, runs at 2 * timeout + * @mvm: mvm pointer, needed for timer context + * @reorder_buf: reorder buffer, allocated per queue + */ +struct iwl_mvm_baid_data { + struct rcu_head rcu_head; + u8 sta_id; + u8 tid; + u8 baid; + u16 timeout; + unsigned long last_rx; + struct timer_list session_timer; + struct iwl_mvm *mvm; + struct iwl_mvm_reorder_buffer reorder_buf[]; +}; + +/* + * enum iwl_mvm_queue_status - queue status + * @IWL_MVM_QUEUE_FREE: the queue is not allocated nor reserved + * Basically, this means that this queue can be used for any purpose + * @IWL_MVM_QUEUE_RESERVED: queue is reserved but not yet in use + * This is the state of a queue that has been dedicated for some RATID + * (agg'd or not), but that hasn't yet gone through the actual enablement + * of iwl_mvm_enable_txq(), and therefore no traffic can go through it yet. + * Note that in this state there is no requirement to already know what TID + * should be used with this queue, it is just marked as a queue that will + * be used, and shouldn't be allocated to anyone else. + * @IWL_MVM_QUEUE_READY: queue is ready to be used + * This is the state of a queue that has been fully configured (including + * SCD pointers, etc), has a specific RA/TID assigned to it, and can be + * used to send traffic. + */ +enum iwl_mvm_queue_status { + IWL_MVM_QUEUE_FREE, + IWL_MVM_QUEUE_RESERVED, + IWL_MVM_QUEUE_READY, +}; + struct iwl_mvm { /* for logger access */ struct device *dev; @@ -633,6 +713,8 @@ struct iwl_mvm { unsigned long status; + u32 queue_sync_cookie; + atomic_t queue_sync_counter; /* * for beacon filtering - * currently only one interface can be supported @@ -666,13 +748,8 @@ struct iwl_mvm { u32 hw_queue_to_mac80211; u8 hw_queue_refcount; u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ - /* - * This is to mark that queue is reserved for a STA but not yet - * allocated. This is needed to make sure we have at least one - * available queue to use when adding a new STA - */ - bool setup_reserved; u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ + enum iwl_mvm_queue_status status; } queue_info[IWL_MAX_HW_QUEUES]; spinlock_t queue_info_lock; /* For syncing queue mgmt operations */ struct work_struct add_stream_wk; /* To add streams to queues */ @@ -920,6 +997,10 @@ struct iwl_mvm { u32 ciphers[6]; struct iwl_mvm_tof_data tof_data; + struct ieee80211_vif *nan_vif; +#define IWL_MAX_BAID 32 + struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID]; + /* * Drop beacons from other APs in AP mode when there are no connected * clients. @@ -1065,7 +1146,8 @@ static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm) static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_CSUM_SUPPORT); + IWL_UCODE_TLV_CAPA_CSUM_SUPPORT) && + !IWL_MVM_HW_CSUM_DISABLE; } static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm) @@ -1242,7 +1324,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, void iwl_mvm_rx_phy_cmd_mq(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); -void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, +void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, const u8 *data, u32 count); @@ -1566,6 +1648,10 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq); void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq); +/* Re-configure the SCD for a queue that has already been configured */ +int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, + int tid, int frame_limit, u16 ssn); + /* Thermal management and CT-kill */ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp); @@ -1628,6 +1714,10 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_tdls_ch_switch_work(struct work_struct *work); +void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, + struct iwl_mvm_internal_rxq_notif *notif, + u32 size); +void iwl_mvm_reorder_timer_expired(unsigned long data); struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 8bfb8e06a90c..a68054f127fa 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -554,8 +554,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0; mvm->aux_queue = 15; - mvm->first_agg_queue = 16; - mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1; + if (!iwl_mvm_is_dqa_supported(mvm)) { + mvm->first_agg_queue = 16; + mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1; + } else { + mvm->first_agg_queue = IWL_MVM_DQA_MIN_DATA_QUEUE; + mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE; + } if (mvm->cfg->base_params->num_of_queues == 16) { mvm->aux_queue = 11; mvm->first_agg_queue = 12; @@ -586,6 +591,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, skb_queue_head_init(&mvm->d0i3_tx); init_waitqueue_head(&mvm->d0i3_exit_waitq); + atomic_set(&mvm->queue_sync_counter, 0); + SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev); /* @@ -930,7 +937,7 @@ static void iwl_mvm_rx(struct iwl_op_mode *op_mode, if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) iwl_mvm_rx_rx_mpdu(mvm, napi, rxb); else if (pkt->hdr.cmd == FRAME_RELEASE) - iwl_mvm_rx_frame_release(mvm, rxb, 0); + iwl_mvm_rx_frame_release(mvm, napi, rxb, 0); else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD) iwl_mvm_rx_rx_phy_cmd(mvm, rxb); else @@ -1208,7 +1215,6 @@ static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm, struct iwl_d0i3_iter_data *iter_data) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct ieee80211_sta *ap_sta; struct iwl_mvm_sta *mvmsta; u32 available_tids = 0; u8 tid; @@ -1217,11 +1223,10 @@ static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm, mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)) return false; - ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]); - if (IS_ERR_OR_NULL(ap_sta)) + mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); + if (!mvmsta) return false; - mvmsta = iwl_mvm_sta_from_mac80211(ap_sta); spin_lock_bh(&mvmsta->lock); for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; @@ -1632,7 +1637,7 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode, struct iwl_rx_packet *pkt = rxb_addr(rxb); if (unlikely(pkt->hdr.cmd == FRAME_RELEASE)) - iwl_mvm_rx_frame_release(mvm, rxb, queue); + iwl_mvm_rx_frame_release(mvm, napi, rxb, queue); else if (unlikely(pkt->hdr.cmd == RX_QUEUES_NOTIFICATION && pkt->hdr.group_id == DATA_PATH_GROUP)) iwl_mvm_rx_queue_notif(mvm, rxb, queue); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 263e8a8576b7..ab7f7eda9c13 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -97,6 +97,7 @@ void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) * Adds the rxb to a new skb and give it to mac80211 */ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, struct napi_struct *napi, struct sk_buff *skb, struct ieee80211_hdr *hdr, u16 len, @@ -131,7 +132,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, fraglen, rxb->truesize); } - ieee80211_rx_napi(mvm->hw, NULL, skb, napi); + ieee80211_rx_napi(mvm->hw, sta, skb, napi); } /* @@ -271,6 +272,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, u32 rate_n_flags; u32 rx_pkt_status; u8 crypt_len = 0; + bool take_ref; phy_info = &mvm->last_phy_info; rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data; @@ -453,8 +455,26 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)) mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND; - iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status, - crypt_len, rxb); + if (unlikely(ieee80211_is_beacon(hdr->frame_control) || + ieee80211_is_probe_resp(hdr->frame_control))) + rx_status->boottime_ns = ktime_get_boot_ns(); + + /* Take a reference briefly to kick off a d0i3 entry delay so + * we can handle bursts of RX packets without toggling the + * state too often. But don't do this for beacons if we are + * going to idle because the beacon filtering changes we make + * cause the firmware to send us collateral beacons. */ + take_ref = !(test_bit(STATUS_TRANS_GOING_IDLE, &mvm->trans->status) && + ieee80211_is_beacon(hdr->frame_control)); + + if (take_ref) + iwl_mvm_ref(mvm, IWL_MVM_REF_RX); + + iwl_mvm_pass_packet_to_mac80211(mvm, sta, napi, skb, hdr, len, + ampdu_status, crypt_len, rxb); + + if (take_ref) + iwl_mvm_unref(mvm, IWL_MVM_REF_RX); } static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 651604d18a32..ac2c5718e454 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -210,7 +210,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, if (iwl_mvm_check_pn(mvm, skb, queue, sta)) kfree_skb(skb); else - ieee80211_rx_napi(mvm->hw, NULL, skb, napi); + ieee80211_rx_napi(mvm->hw, sta, skb, napi); } static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, @@ -395,6 +395,150 @@ int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, return ret; } +/* + * Returns true if sn2 - buffer_size < sn1 < sn2. + * To be used only in order to compare reorder buffer head with NSSN. + * We fully trust NSSN unless it is behind us due to reorder timeout. + * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN. + */ +static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size) +{ + return ieee80211_sn_less(sn1, sn2) && + !ieee80211_sn_less(sn1, sn2 - buffer_size); +} + +#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10) + +static void iwl_mvm_release_frames(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct napi_struct *napi, + struct iwl_mvm_reorder_buffer *reorder_buf, + u16 nssn) +{ + u16 ssn = reorder_buf->head_sn; + + lockdep_assert_held(&reorder_buf->lock); + + /* ignore nssn smaller than head sn - this can happen due to timeout */ + if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size)) + return; + + while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) { + int index = ssn % reorder_buf->buf_size; + struct sk_buff_head *skb_list = &reorder_buf->entries[index]; + struct sk_buff *skb; + + ssn = ieee80211_sn_inc(ssn); + + /* holes are valid since nssn indicates frames were received. */ + if (skb_queue_empty(skb_list) || !skb_peek_tail(skb_list)) + continue; + /* Empty the list. Will have more than one frame for A-MSDU */ + while ((skb = __skb_dequeue(skb_list))) { + iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, + reorder_buf->queue, + sta); + reorder_buf->num_stored--; + } + } + reorder_buf->head_sn = nssn; + + if (reorder_buf->num_stored && !reorder_buf->removed) { + u16 index = reorder_buf->head_sn % reorder_buf->buf_size; + + while (!skb_peek_tail(&reorder_buf->entries[index])) + index = (index + 1) % reorder_buf->buf_size; + /* modify timer to match next frame's expiration time */ + mod_timer(&reorder_buf->reorder_timer, + reorder_buf->reorder_time[index] + 1 + + RX_REORDER_BUF_TIMEOUT_MQ); + } else { + del_timer(&reorder_buf->reorder_timer); + } +} + +void iwl_mvm_reorder_timer_expired(unsigned long data) +{ + struct iwl_mvm_reorder_buffer *buf = (void *)data; + int i; + u16 sn = 0, index = 0; + bool expired = false; + + spin_lock_bh(&buf->lock); + + if (!buf->num_stored || buf->removed) { + spin_unlock_bh(&buf->lock); + return; + } + + for (i = 0; i < buf->buf_size ; i++) { + index = (buf->head_sn + i) % buf->buf_size; + + if (!skb_peek_tail(&buf->entries[index])) + continue; + if (!time_after(jiffies, buf->reorder_time[index] + + RX_REORDER_BUF_TIMEOUT_MQ)) + break; + expired = true; + sn = ieee80211_sn_add(buf->head_sn, i + 1); + } + + if (expired) { + struct ieee80211_sta *sta; + + rcu_read_lock(); + sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[buf->sta_id]); + /* SN is set to the last expired frame + 1 */ + iwl_mvm_release_frames(buf->mvm, sta, NULL, buf, sn); + rcu_read_unlock(); + } else if (buf->num_stored) { + /* + * If no frame expired and there are stored frames, index is now + * pointing to the first unexpired frame - modify timer + * accordingly to this frame. + */ + mod_timer(&buf->reorder_timer, + buf->reorder_time[index] + + 1 + RX_REORDER_BUF_TIMEOUT_MQ); + } + spin_unlock_bh(&buf->lock); +} + +static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue, + struct iwl_mvm_delba_data *data) +{ + struct iwl_mvm_baid_data *ba_data; + struct ieee80211_sta *sta; + struct iwl_mvm_reorder_buffer *reorder_buf; + u8 baid = data->baid; + + if (WARN_ON_ONCE(baid >= IWL_RX_REORDER_DATA_INVALID_BAID)) + return; + + rcu_read_lock(); + + ba_data = rcu_dereference(mvm->baid_map[baid]); + if (WARN_ON_ONCE(!ba_data)) + goto out; + + sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]); + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) + goto out; + + reorder_buf = &ba_data->reorder_buf[queue]; + + /* release all frames that are in the reorder buffer to the stack */ + spin_lock_bh(&reorder_buf->lock); + iwl_mvm_release_frames(mvm, sta, NULL, reorder_buf, + ieee80211_sn_add(reorder_buf->head_sn, + reorder_buf->buf_size)); + spin_unlock_bh(&reorder_buf->lock); + del_timer_sync(&reorder_buf->reorder_timer); + +out: + rcu_read_unlock(); +} + void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, int queue) { @@ -405,15 +549,182 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, notif = (void *)pkt->data; internal_notif = (void *)notif->payload; + if (internal_notif->sync) { + if (mvm->queue_sync_cookie != internal_notif->cookie) { + WARN_ONCE(1, + "Received expired RX queue sync message\n"); + return; + } + atomic_dec(&mvm->queue_sync_counter); + } + switch (internal_notif->type) { + case IWL_MVM_RXQ_EMPTY: + break; case IWL_MVM_RXQ_NOTIF_DEL_BA: - /* TODO */ + iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data); break; default: WARN_ONCE(1, "Invalid identifier %d", internal_notif->type); } } +/* + * Returns true if the MPDU was buffered\dropped, false if it should be passed + * to upper layer. + */ +static bool iwl_mvm_reorder(struct iwl_mvm *mvm, + struct napi_struct *napi, + int queue, + struct ieee80211_sta *sta, + struct sk_buff *skb, + struct iwl_rx_mpdu_desc *desc) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_baid_data *baid_data; + struct iwl_mvm_reorder_buffer *buffer; + struct sk_buff *tail; + u32 reorder = le32_to_cpu(desc->reorder_data); + bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU; + u8 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; + u8 sub_frame_idx = desc->amsdu_info & + IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; + int index; + u16 nssn, sn; + u8 baid; + + baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >> + IWL_RX_MPDU_REORDER_BAID_SHIFT; + + if (baid == IWL_RX_REORDER_DATA_INVALID_BAID) + return false; + + /* no sta yet */ + if (WARN_ON(IS_ERR_OR_NULL(sta))) + return false; + + /* not a data packet */ + if (!ieee80211_is_data_qos(hdr->frame_control) || + is_multicast_ether_addr(hdr->addr1)) + return false; + + if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) + return false; + + baid_data = rcu_dereference(mvm->baid_map[baid]); + if (WARN(!baid_data, + "Received baid %d, but no data exists for this BAID\n", baid)) + return false; + if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id, + "baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n", + baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id, + tid)) + return false; + + nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK; + sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >> + IWL_RX_MPDU_REORDER_SN_SHIFT; + + buffer = &baid_data->reorder_buf[queue]; + + spin_lock_bh(&buffer->lock); + + /* + * If there was a significant jump in the nssn - adjust. + * If the SN is smaller than the NSSN it might need to first go into + * the reorder buffer, in which case we just release up to it and the + * rest of the function will take of storing it and releasing up to the + * nssn + */ + if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size, + buffer->buf_size)) { + u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn; + + iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn); + } + + /* drop any oudated packets */ + if (ieee80211_sn_less(sn, buffer->head_sn)) + goto drop; + + /* release immediately if allowed by nssn and no stored frames */ + if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) { + if (iwl_mvm_is_sn_less(buffer->head_sn, nssn, + buffer->buf_size)) + buffer->head_sn = nssn; + /* No need to update AMSDU last SN - we are moving the head */ + spin_unlock_bh(&buffer->lock); + return false; + } + + index = sn % buffer->buf_size; + + /* + * Check if we already stored this frame + * As AMSDU is either received or not as whole, logic is simple: + * If we have frames in that position in the buffer and the last frame + * originated from AMSDU had a different SN then it is a retransmission. + * If it is the same SN then if the subframe index is incrementing it + * is the same AMSDU - otherwise it is a retransmission. + */ + tail = skb_peek_tail(&buffer->entries[index]); + if (tail && !amsdu) + goto drop; + else if (tail && (sn != buffer->last_amsdu || + buffer->last_sub_index >= sub_frame_idx)) + goto drop; + + /* put in reorder buffer */ + __skb_queue_tail(&buffer->entries[index], skb); + buffer->num_stored++; + buffer->reorder_time[index] = jiffies; + + if (amsdu) { + buffer->last_amsdu = sn; + buffer->last_sub_index = sub_frame_idx; + } + + iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn); + spin_unlock_bh(&buffer->lock); + return true; + +drop: + kfree_skb(skb); + spin_unlock_bh(&buffer->lock); + return true; +} + +static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid) +{ + unsigned long now = jiffies; + unsigned long timeout; + struct iwl_mvm_baid_data *data; + + rcu_read_lock(); + + data = rcu_dereference(mvm->baid_map[baid]); + if (WARN_ON(!data)) + goto out; + + if (!data->timeout) + goto out; + + timeout = data->timeout; + /* + * Do not update last rx all the time to avoid cache bouncing + * between the rx queues. + * Update it every timeout. Worst case is the session will + * expire after ~ 2 * timeout, which doesn't matter that much. + */ + if (time_before(data->last_rx + TU_TO_JIFFIES(timeout), now)) + /* Update is atomic */ + data->last_rx = now; + +out: + rcu_read_unlock(); +} + void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { @@ -484,6 +795,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + u8 baid = (u8)((le32_to_cpu(desc->reorder_data) & + IWL_RX_MPDU_REORDER_BAID_MASK) >> + IWL_RX_MPDU_REORDER_BAID_SHIFT); /* * We have tx blocked stations (with CS bit). If we heard @@ -536,6 +850,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; } + if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) + iwl_mvm_agg_rx_received(mvm, baid); } /* @@ -593,12 +909,42 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, /* TODO: PHY info - gscan */ iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb); - iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta); + if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc)) + iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta); rcu_read_unlock(); } -void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, +void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { - /* TODO */ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_frame_release *release = (void *)pkt->data; + struct ieee80211_sta *sta; + struct iwl_mvm_reorder_buffer *reorder_buf; + struct iwl_mvm_baid_data *ba_data; + + int baid = release->baid; + + if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID)) + return; + + rcu_read_lock(); + + ba_data = rcu_dereference(mvm->baid_map[baid]); + if (WARN_ON_ONCE(!ba_data)) + goto out; + + sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]); + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) + goto out; + + reorder_buf = &ba_data->reorder_buf[queue]; + + spin_lock_bh(&reorder_buf->lock); + iwl_mvm_release_frames(mvm, sta, napi, reorder_buf, + le16_to_cpu(release->nssn)); + spin_unlock_bh(&reorder_buf->lock); + +out: + rcu_read_unlock(); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 12614b7b7fe7..fea4d3437e2f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -223,6 +223,39 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, return ret; } +static void iwl_mvm_rx_agg_session_expired(unsigned long data) +{ + struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data; + struct iwl_mvm_baid_data *ba_data; + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvm_sta; + unsigned long timeout; + + rcu_read_lock(); + + ba_data = rcu_dereference(*rcu_ptr); + + if (WARN_ON(!ba_data)) + goto unlock; + + if (!ba_data->timeout) + goto unlock; + + timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2); + if (time_is_after_jiffies(timeout)) { + mod_timer(&ba_data->session_timer, timeout); + goto unlock; + } + + /* Timer expired */ + sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]); + mvm_sta = iwl_mvm_sta_from_mac80211(sta); + ieee80211_stop_rx_ba_session_offl(mvm_sta->vif, + sta->addr, ba_data->tid); +unlock: + rcu_read_unlock(); +} + static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta) { @@ -293,6 +326,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, u8 mac_queue = mvmsta->vif->hw_queue[ac]; int queue = -1; int ssn; + int ret; lockdep_assert_held(&mvm->mutex); @@ -321,8 +355,15 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, if (queue < 0) queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); + + /* + * Mark TXQ as ready, even though it hasn't been fully configured yet, + * to make sure no one else takes it. + * This will allow avoiding re-acquiring the lock at the end of the + * configuration. On error we'll mark it back as free. + */ if (queue >= 0) - mvm->queue_info[queue].setup_reserved = false; + mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; spin_unlock_bh(&mvm->queue_info_lock); @@ -354,7 +395,16 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; spin_unlock_bh(&mvmsta->lock); - return iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); + ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); + if (ret) + goto out_err; + + return 0; + +out_err: + iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0); + + return ret; } static inline u8 iwl_mvm_tid_to_ac_queue(int tid) @@ -460,7 +510,8 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, /* Make sure we have free resources for this STA */ if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount && - !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].setup_reserved) + (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status == + IWL_MVM_QUEUE_FREE)) queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; else queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE, @@ -470,7 +521,7 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, IWL_ERR(mvm, "No available queues for new station\n"); return -ENOSPC; } - mvm->queue_info[queue].setup_reserved = true; + mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; spin_unlock_bh(&mvm->queue_info_lock); @@ -1000,6 +1051,33 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); + if (iwl_mvm_is_dqa_supported(mvm)) { + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = IWL_MVM_TX_FIFO_VO, + .sta_id = mvmvif->bcast_sta.sta_id, + .tid = IWL_MAX_TID_COUNT, + .aggregate = false, + .frame_limit = IWL_FRAME_LIMIT, + }; + unsigned int wdg_timeout = + iwl_mvm_get_wd_timeout(mvm, vif, false, false); + int queue; + + if ((vif->type == NL80211_IFTYPE_AP) && + (mvmvif->bcast_sta.tfd_queue_msk & + BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE))) + queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; + else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) && + (mvmvif->bcast_sta.tfd_queue_msk & + BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE))) + queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; + else if (WARN(1, "Missed required TXQ for adding bcast STA\n")) + return -EINVAL; + + iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg, + wdg_timeout); + } + if (vif->type == NL80211_IFTYPE_ADHOC) baddr = vif->bss_conf.bssid; @@ -1028,20 +1106,28 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u32 qmask; + u32 qmask = 0; lockdep_assert_held(&mvm->mutex); - qmask = iwl_mvm_mac_get_queues_mask(vif); + if (!iwl_mvm_is_dqa_supported(mvm)) + qmask = iwl_mvm_mac_get_queues_mask(vif); - /* - * The firmware defines the TFD queue mask to only be relevant - * for *unicast* queues, so the multicast (CAB) queue shouldn't - * be included. - */ - if (vif->type == NL80211_IFTYPE_AP) + if (vif->type == NL80211_IFTYPE_AP) { + /* + * The firmware defines the TFD queue mask to only be relevant + * for *unicast* queues, so the multicast (CAB) queue shouldn't + * be included. + */ qmask &= ~BIT(vif->cab_queue); + if (iwl_mvm_is_dqa_supported(mvm)) + qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE); + } else if (iwl_mvm_is_dqa_supported(mvm) && + vif->type == NL80211_IFTYPE_P2P_DEVICE) { + qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE); + } + return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask, ieee80211_vif_type_p2p(vif)); } @@ -1099,11 +1185,92 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) #define IWL_MAX_RX_BA_SESSIONS 16 +static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) +{ + struct iwl_mvm_delba_notif notif = { + .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA, + .metadata.sync = 1, + .delba.baid = baid, + }; + iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif)); +}; + +static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, + struct iwl_mvm_baid_data *data) +{ + int i; + + iwl_mvm_sync_rxq_del_ba(mvm, data->baid); + + for (i = 0; i < mvm->trans->num_rx_queues; i++) { + int j; + struct iwl_mvm_reorder_buffer *reorder_buf = + &data->reorder_buf[i]; + + spin_lock_bh(&reorder_buf->lock); + if (likely(!reorder_buf->num_stored)) { + spin_unlock_bh(&reorder_buf->lock); + continue; + } + + /* + * This shouldn't happen in regular DELBA since the internal + * delBA notification should trigger a release of all frames in + * the reorder buffer. + */ + WARN_ON(1); + + for (j = 0; j < reorder_buf->buf_size; j++) + __skb_queue_purge(&reorder_buf->entries[j]); + /* + * Prevent timer re-arm. This prevents a very far fetched case + * where we timed out on the notification. There may be prior + * RX frames pending in the RX queue before the notification + * that might get processed between now and the actual deletion + * and we would re-arm the timer although we are deleting the + * reorder buffer. + */ + reorder_buf->removed = true; + spin_unlock_bh(&reorder_buf->lock); + del_timer_sync(&reorder_buf->reorder_timer); + } +} + +static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, + u32 sta_id, + struct iwl_mvm_baid_data *data, + u16 ssn, u8 buf_size) +{ + int i; + + for (i = 0; i < mvm->trans->num_rx_queues; i++) { + struct iwl_mvm_reorder_buffer *reorder_buf = + &data->reorder_buf[i]; + int j; + + reorder_buf->num_stored = 0; + reorder_buf->head_sn = ssn; + reorder_buf->buf_size = buf_size; + /* rx reorder timer */ + reorder_buf->reorder_timer.function = + iwl_mvm_reorder_timer_expired; + reorder_buf->reorder_timer.data = (unsigned long)reorder_buf; + init_timer(&reorder_buf->reorder_timer); + spin_lock_init(&reorder_buf->lock); + reorder_buf->mvm = mvm; + reorder_buf->queue = i; + reorder_buf->sta_id = sta_id; + for (j = 0; j < reorder_buf->buf_size; j++) + __skb_queue_head_init(&reorder_buf->entries[j]); + } +} + int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int tid, u16 ssn, bool start, u8 buf_size) + int tid, u16 ssn, bool start, u8 buf_size, u16 timeout) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = {}; + struct iwl_mvm_baid_data *baid_data = NULL; int ret; u32 status; @@ -1114,6 +1281,19 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, return -ENOSPC; } + if (iwl_mvm_has_new_rx_api(mvm) && start) { + /* + * Allocate here so if allocation fails we can bail out early + * before starting the BA session in the firmware + */ + baid_data = kzalloc(sizeof(*baid_data) + + mvm->trans->num_rx_queues * + sizeof(baid_data->reorder_buf[0]), + GFP_KERNEL); + if (!baid_data) + return -ENOMEM; + } + cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); cmd.sta_id = mvm_sta->sta_id; cmd.add_modify = STA_MODE_MODIFY; @@ -1132,7 +1312,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) - return ret; + goto out_free; switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: @@ -1150,14 +1330,75 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, break; } - if (!ret) { - if (start) - mvm->rx_ba_sessions++; - else if (mvm->rx_ba_sessions > 0) - /* check that restart flow didn't zero the counter */ - mvm->rx_ba_sessions--; + if (ret) + goto out_free; + + if (start) { + u8 baid; + + mvm->rx_ba_sessions++; + + if (!iwl_mvm_has_new_rx_api(mvm)) + return 0; + + if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) { + ret = -EINVAL; + goto out_free; + } + baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >> + IWL_ADD_STA_BAID_SHIFT); + baid_data->baid = baid; + baid_data->timeout = timeout; + baid_data->last_rx = jiffies; + init_timer(&baid_data->session_timer); + baid_data->session_timer.function = + iwl_mvm_rx_agg_session_expired; + baid_data->session_timer.data = + (unsigned long)&mvm->baid_map[baid]; + baid_data->mvm = mvm; + baid_data->tid = tid; + baid_data->sta_id = mvm_sta->sta_id; + + mvm_sta->tid_to_baid[tid] = baid; + if (timeout) + mod_timer(&baid_data->session_timer, + TU_TO_EXP_TIME(timeout * 2)); + + iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id, + baid_data, ssn, buf_size); + /* + * protect the BA data with RCU to cover a case where our + * internal RX sync mechanism will timeout (not that it's + * supposed to happen) and we will free the session data while + * RX is being processed in parallel + */ + WARN_ON(rcu_access_pointer(mvm->baid_map[baid])); + rcu_assign_pointer(mvm->baid_map[baid], baid_data); + } else if (mvm->rx_ba_sessions > 0) { + u8 baid = mvm_sta->tid_to_baid[tid]; + + /* check that restart flow didn't zero the counter */ + mvm->rx_ba_sessions--; + if (!iwl_mvm_has_new_rx_api(mvm)) + return 0; + + if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID)) + return -EINVAL; + + baid_data = rcu_access_pointer(mvm->baid_map[baid]); + if (WARN_ON(!baid_data)) + return -EINVAL; + + /* synchronize all rx queues so we can safely delete */ + iwl_mvm_free_reorder(mvm, baid_data); + del_timer_sync(&baid_data->session_timer); + RCU_INIT_POINTER(mvm->baid_map[baid], NULL); + kfree_rcu(baid_data, rcu_head); } + return 0; +out_free: + kfree(baid_data); return ret; } @@ -1175,7 +1416,9 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, mvm_sta->tfd_queue_msk |= BIT(queue); mvm_sta->tid_disable_agg &= ~BIT(tid); } else { - mvm_sta->tfd_queue_msk &= ~BIT(queue); + /* In DQA-mode the queue isn't removed on agg termination */ + if (!iwl_mvm_is_dqa_supported(mvm)) + mvm_sta->tfd_queue_msk &= ~BIT(queue); mvm_sta->tid_disable_agg |= BIT(tid); } @@ -1258,17 +1501,35 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, spin_lock_bh(&mvm->queue_info_lock); - txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue, - mvm->last_agg_queue); - if (txq_id < 0) { - ret = txq_id; - spin_unlock_bh(&mvm->queue_info_lock); - IWL_ERR(mvm, "Failed to allocate agg queue\n"); - goto release_locks; + /* + * Note the possible cases: + * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed + * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free + * one and mark it as reserved + * 3. In DQA mode, but no traffic yet on this TID: same treatment as in + * non-DQA mode, since the TXQ hasn't yet been allocated + */ + txq_id = mvmsta->tid_data[tid].txq_id; + if (!iwl_mvm_is_dqa_supported(mvm) || + mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) { + txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue, + mvm->last_agg_queue); + if (txq_id < 0) { + ret = txq_id; + spin_unlock_bh(&mvm->queue_info_lock); + IWL_ERR(mvm, "Failed to allocate agg queue\n"); + goto release_locks; + } + + /* TXQ hasn't yet been enabled, so mark it only as reserved */ + mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; } - mvm->queue_info[txq_id].setup_reserved = true; spin_unlock_bh(&mvm->queue_info_lock); + IWL_DEBUG_TX_QUEUES(mvm, + "AGG for tid %d will be on queue #%d\n", + tid, txq_id); + tid_data = &mvmsta->tid_data[tid]; tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); tid_data->txq_id = txq_id; @@ -1303,6 +1564,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); int queue, ret; + bool alloc_queue = true; u16 ssn; struct iwl_trans_txq_scd_cfg cfg = { @@ -1328,8 +1590,46 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; - iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[tid_to_mac80211_ac[tid]], - ssn, &cfg, wdg_timeout); + /* In DQA mode, the existing queue might need to be reconfigured */ + if (iwl_mvm_is_dqa_supported(mvm)) { + spin_lock_bh(&mvm->queue_info_lock); + /* Maybe there is no need to even alloc a queue... */ + if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) + alloc_queue = false; + spin_unlock_bh(&mvm->queue_info_lock); + + /* + * Only reconfig the SCD for the queue if the window size has + * changed from current (become smaller) + */ + if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) { + /* + * If reconfiguring an existing queue, it first must be + * drained + */ + ret = iwl_trans_wait_tx_queue_empty(mvm->trans, + BIT(queue)); + if (ret) { + IWL_ERR(mvm, + "Error draining queue before reconfig\n"); + return ret; + } + + ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, + mvmsta->sta_id, tid, + buf_size, ssn); + if (ret) { + IWL_ERR(mvm, + "Error reconfiguring TXQ #%d\n", queue); + return ret; + } + } + } + + if (alloc_queue) + iwl_mvm_enable_txq(mvm, queue, + vif->hw_queue[tid_to_mac80211_ac[tid]], ssn, + &cfg, wdg_timeout); ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); if (ret) @@ -1337,7 +1637,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, /* No need to mark as reserved */ spin_lock_bh(&mvm->queue_info_lock); - mvm->queue_info[queue].setup_reserved = false; + mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; spin_unlock_bh(&mvm->queue_info_lock); /* @@ -1384,9 +1684,16 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvmsta->agg_tids &= ~BIT(tid); - /* No need to mark as reserved anymore */ spin_lock_bh(&mvm->queue_info_lock); - mvm->queue_info[txq_id].setup_reserved = false; + /* + * The TXQ is marked as reserved only if no traffic came through yet + * This means no traffic has been sent on this TID (agg'd or not), so + * we no longer have use for the queue. Since it hasn't even been + * allocated through iwl_mvm_enable_txq, so we can just mark it back as + * free. + */ + if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) + mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; spin_unlock_bh(&mvm->queue_info_lock); switch (tid_data->state) { @@ -1412,9 +1719,11 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); - iwl_mvm_disable_txq(mvm, txq_id, - vif->hw_queue[tid_to_mac80211_ac[tid]], tid, - 0); + if (!iwl_mvm_is_dqa_supported(mvm)) { + int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]]; + + iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0); + } return 0; case IWL_AGG_STARTING: case IWL_EMPTYING_HW_QUEUE_ADDBA: @@ -1465,9 +1774,16 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvmsta->agg_tids &= ~BIT(tid); spin_unlock_bh(&mvmsta->lock); - /* No need to mark as reserved */ spin_lock_bh(&mvm->queue_info_lock); - mvm->queue_info[txq_id].setup_reserved = false; + /* + * The TXQ is marked as reserved only if no traffic came through yet + * This means no traffic has been sent on this TID (agg'd or not), so + * we no longer have use for the queue. Since it hasn't even been + * allocated through iwl_mvm_enable_txq, so we can just mark it back as + * free. + */ + if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) + mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; spin_unlock_bh(&mvm->queue_info_lock); if (old_state >= IWL_AGG_ON) { @@ -1480,9 +1796,12 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); - iwl_mvm_disable_txq(mvm, tid_data->txq_id, - vif->hw_queue[tid_to_mac80211_ac[tid]], tid, - 0); + if (!iwl_mvm_is_dqa_supported(mvm)) { + int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]]; + + iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue, + tid, 0); + } } return 0; @@ -1533,17 +1852,12 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { u8 sta_id = mvmvif->ap_sta_id; - sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], - lockdep_is_held(&mvm->mutex)); /* * It is possible that the 'sta' parameter is NULL, * for example when a GTK is removed - the sta_id will then * be the AP ID, and no station was passed by mac80211. */ - if (IS_ERR_OR_NULL(sta)) - return NULL; - - return iwl_mvm_sta_from_mac80211(sta); + return iwl_mvm_sta_from_staid_protected(mvm, sta_id); } return NULL; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index e3efdcd900f0..d2c58f134fcf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -348,6 +348,15 @@ struct iwl_mvm_key_pn { } ____cacheline_aligned_in_smp q[]; }; +struct iwl_mvm_delba_data { + u32 baid; +} __packed; + +struct iwl_mvm_delba_notif { + struct iwl_mvm_internal_rxq_notif metadata; + struct iwl_mvm_delba_data delba; +} __packed; + /** * struct iwl_mvm_rxq_dup_data - per station per rx queue data * @last_seq: last sequence per tid for duplicate packet detection @@ -373,6 +382,7 @@ struct iwl_mvm_rxq_dup_data { * @lock: lock to protect the whole struct. Since %tid_data is access from Tx * and from Tx response flow, it needs a spinlock. * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data. + * @tid_to_baid: a simple map of TID to baid * @reserved_queue: the queue reserved for this STA for DQA purposes * Every STA has is given one reserved queue to allow it to operate. If no * such queue can be guaranteed, the STA addition will fail. @@ -406,6 +416,7 @@ struct iwl_mvm_sta { bool next_status_eosp; spinlock_t lock; struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1]; + u8 tid_to_baid[IWL_MAX_TID_COUNT]; struct iwl_lq_sta lq_sta; struct ieee80211_vif *vif; struct iwl_mvm_key_pn __rcu *ptk_pn[4]; @@ -487,7 +498,7 @@ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, /* AMPDU */ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int tid, u16 ssn, bool start, u8 buf_size); + int tid, u16 ssn, bool start, u8 buf_size, u16 timeout); int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 *ssn); int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index eb3f460ce1b6..58fc7b3c711c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -359,16 +359,14 @@ static void iwl_mvm_tt_smps_iterator(void *_data, u8 *mac, static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable) { - struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; int i, err; for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], - lockdep_is_held(&mvm->mutex)); - if (IS_ERR_OR_NULL(sta)) + mvmsta = iwl_mvm_sta_from_staid_protected(mvm, i); + if (!mvmsta) continue; - mvmsta = iwl_mvm_sta_from_mac80211(sta); + if (enable == mvmsta->tt_tx_protection) continue; err = iwl_mvm_tx_protection(mvm, mvmsta, enable); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index c53aa0f220e0..779bafcbc9a1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -475,6 +475,21 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, return dev_cmd; } +static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info, __le16 fc) +{ + if (iwl_mvm_is_dqa_supported(mvm)) { + if (info->control.vif->type == NL80211_IFTYPE_AP && + ieee80211_is_probe_resp(fc)) + return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; + else if (ieee80211_is_mgmt(fc) && + info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE) + return IWL_MVM_DQA_P2P_DEVICE_QUEUE; + } + + return info->hw_queue; +} + int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; @@ -484,6 +499,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) struct iwl_tx_cmd *tx_cmd; u8 sta_id; int hdrlen = ieee80211_hdrlen(hdr->frame_control); + int queue; memcpy(&info, skb->cb, sizeof(info)); @@ -508,6 +524,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) info.control.vif->type == NL80211_IFTYPE_STATION) IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue; + queue = info.hw_queue; + /* * If the interface on which the frame is sent is the P2P_DEVICE * or an AP/GO interface use the broadcast station associated @@ -523,10 +541,12 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) iwl_mvm_vif_from_mac80211(info.control.vif); if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || - info.control.vif->type == NL80211_IFTYPE_AP) + info.control.vif->type == NL80211_IFTYPE_AP) { sta_id = mvmvif->bcast_sta.sta_id; - else if (info.control.vif->type == NL80211_IFTYPE_STATION && - is_multicast_ether_addr(hdr->addr1)) { + queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, + hdr->frame_control); + } else if (info.control.vif->type == NL80211_IFTYPE_STATION && + is_multicast_ether_addr(hdr->addr1)) { u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id); if (ap_sta_id != IWL_MVM_STATION_COUNT) @@ -534,7 +554,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) } } - IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info.hw_queue); + IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue); dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id); if (!dev_cmd) @@ -545,7 +565,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) /* Copy MAC header from skb into command buffer */ memcpy(tx_cmd->hdr, hdr, hdrlen); - if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info.hw_queue)) { + if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) { iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); return -1; } @@ -589,9 +609,11 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) return -EINVAL; + dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len); + if (!sta->max_amsdu_len || !ieee80211_is_data_qos(hdr->frame_control) || - !mvmsta->tlc_amsdu) { + (!mvmsta->tlc_amsdu && !dbg_max_amsdu_len)) { num_subframes = 1; pad = 0; goto segment; @@ -622,7 +644,6 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, } max_amsdu_len = sta->max_amsdu_len; - dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len); /* the Tx FIFO to which this A-MSDU will be routed */ txf = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; @@ -636,7 +657,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, max_amsdu_len = min_t(unsigned int, max_amsdu_len, mvm->shared_mem_cfg.txfifo_size[txf] - 256); - if (dbg_max_amsdu_len) + if (unlikely(dbg_max_amsdu_len)) max_amsdu_len = min_t(unsigned int, max_amsdu_len, dbg_max_amsdu_len); @@ -912,7 +933,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, spin_unlock(&mvmsta->lock); - if (txq_id < mvm->first_agg_queue) + /* Increase pending frames count if this isn't AMPDU */ + if (!is_ampdu) atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); return 0; @@ -1160,6 +1182,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, u8 skb_freed = 0; u16 next_reclaimed, seq_ctl; bool is_ndp = false; + bool txq_agg = false; /* Is this TXQ aggregated */ __skb_queue_head_init(&skbs); @@ -1290,6 +1313,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, bool send_eosp_ndp = false; spin_lock_bh(&mvmsta->lock); + txq_agg = (mvmsta->tid_data[tid].state == IWL_AGG_ON); + if (!is_ndp) { tid_data->next_reclaimed = next_reclaimed; IWL_DEBUG_TX_REPLY(mvm, @@ -1345,11 +1370,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, * If the txq is not an AMPDU queue, there is no chance we freed * several skbs. Check that out... */ - if (txq_id >= mvm->first_agg_queue) + if (txq_agg) goto out; /* We can't free more than one frame at once on a shared queue */ - WARN_ON(skb_freed > 1); + WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1)); /* If we have still frames for this STA nothing to do here */ if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) @@ -1443,9 +1468,12 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); u16 sequence = le16_to_cpu(pkt->hdr.sequence); - struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + int queue = SEQ_TO_QUEUE(sequence); - if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < mvm->first_agg_queue)) + if (WARN_ON_ONCE(queue < mvm->first_agg_queue && + (!iwl_mvm_is_dqa_supported(mvm) || + (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)))) return; if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS)) @@ -1455,10 +1483,9 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, rcu_read_lock(); - sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); - if (!WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + if (!WARN_ON_ONCE(!mvmsta)) { mvmsta->tid_data[tid].rate_n_flags = le32_to_cpu(tx_resp->initial_rate); mvmsta->tid_data[tid].tx_time = diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index f0ffd62f02d3..161b99efd63d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -90,11 +90,17 @@ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd) * the mutex, this ensures we don't try to send two * (or more) synchronous commands at a time. */ - if (!(cmd->flags & CMD_ASYNC)) + if (!(cmd->flags & CMD_ASYNC)) { lockdep_assert_held(&mvm->mutex); + if (!(cmd->flags & CMD_SEND_IN_IDLE)) + iwl_mvm_ref(mvm, IWL_MVM_REF_SENDING_CMD); + } ret = iwl_trans_send_cmd(mvm->trans, cmd); + if (!(cmd->flags & (CMD_ASYNC | CMD_SEND_IN_IDLE))) + iwl_mvm_unref(mvm, IWL_MVM_REF_SENDING_CMD); + /* * If the caller wants the SKB, then don't hide any problems, the * caller might access the response buffer which will be NULL if @@ -581,12 +587,45 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq) for (i = minq; i <= maxq; i++) if (mvm->queue_info[i].hw_queue_refcount == 0 && - !mvm->queue_info[i].setup_reserved) + mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) return i; return -ENOSPC; } +int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, + int tid, int frame_limit, u16 ssn) +{ + struct iwl_scd_txq_cfg_cmd cmd = { + .scd_queue = queue, + .enable = 1, + .window = frame_limit, + .sta_id = sta_id, + .ssn = cpu_to_le16(ssn), + .tx_fifo = fifo, + .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || + queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE), + .tid = tid, + }; + int ret; + + spin_lock_bh(&mvm->queue_info_lock); + if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0, + "Trying to reconfig unallocated queue %d\n", queue)) { + spin_unlock_bh(&mvm->queue_info_lock); + return -ENXIO; + } + spin_unlock_bh(&mvm->queue_info_lock); + + IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue); + + ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); + WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n", + queue, fifo, ret); + + return ret; +} + void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, unsigned int wdg_timeout) @@ -682,6 +721,8 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, mvm->queue_info[queue].hw_queue_refcount--; cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0; + if (!cmd.enable) + mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; IWL_DEBUG_TX_QUEUES(mvm, "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", |