diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath11k/dp_tx.c')
| -rw-r--r-- | drivers/net/wireless/ath/ath11k/dp_tx.c | 572 |
1 files changed, 435 insertions, 137 deletions
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c index 41c990aec6b7..562aba66582f 100644 --- a/drivers/net/wireless/ath/ath11k/dp_tx.c +++ b/drivers/net/wireless/ath/ath11k/dp_tx.c @@ -1,20 +1,28 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include "core.h" #include "dp_tx.h" #include "debug.h" +#include "debugfs_sta.h" #include "hw.h" #include "peer.h" +#include "mac.h" static enum hal_tcl_encap_type ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + struct ath11k_base *ab = arvif->ar->ab; - if (tx_info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) + if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) + return HAL_TCL_ENCAP_TYPE_RAW; + + if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) return HAL_TCL_ENCAP_TYPE_ETHERNET; return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI; @@ -73,11 +81,11 @@ enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher) } int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif, - struct sk_buff *skb) + struct ath11k_sta *arsta, struct sk_buff *skb) { struct ath11k_base *ab = ar->ab; struct ath11k_dp *dp = &ab->dp; - struct hal_tx_info ti = {0}; + struct hal_tx_info ti = {}; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb); struct hal_srng *tcl_ring; @@ -87,30 +95,27 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif, u8 pool_id; u8 hal_ring_id; int ret; - u8 ring_selector = 0, ring_map = 0; + u32 ring_selector = 0; + u8 ring_map = 0; bool tcl_ring_retry; - if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) + if (unlikely(test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))) return -ESHUTDOWN; - if (!(info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) && - !ieee80211_is_data(hdr->frame_control)) - return -ENOTSUPP; + if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && + !ieee80211_is_data(hdr->frame_control))) + return -EOPNOTSUPP; pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1); - /* Let the default ring selection be based on a round robin - * fashion where one of the 3 tcl rings are selected based on - * the tcl_ring_selector counter. In case that ring - * is full/busy, we resort to other available rings. - * If all rings are full, we drop the packet. - * //TODO Add throttling logic when all rings are full - */ - ring_selector = atomic_inc_return(&ab->tcl_ring_selector); + ring_selector = ab->hw_params.hw_ops->get_ring_selector(skb); tcl_ring_sel: tcl_ring_retry = false; - ti.ring_id = ring_selector % DP_TCL_NUM_RING_MAX; + + ti.ring_id = ring_selector % ab->hw_params.max_tx_ring; + ti.rbm_id = ab->hw_params.hal_params->tcl2wbm_rbm_map[ti.ring_id].rbm_id; + ring_map |= BIT(ti.ring_id); tx_ring = &dp->tx_ring[ti.ring_id]; @@ -120,9 +125,12 @@ tcl_ring_sel: DP_TX_IDR_SIZE - 1, GFP_ATOMIC); spin_unlock_bh(&tx_ring->tx_idr_lock); - if (ret < 0) { - if (ring_map == (BIT(DP_TCL_NUM_RING_MAX) - 1)) + if (unlikely(ret < 0)) { + if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1) || + !ab->hw_params.tcl_ring_retry) { + atomic_inc(&ab->soc_stats.tx_err.misc_fail); return -ENOSPC; + } /* Check if the next ring is available */ ring_selector++; @@ -133,13 +141,27 @@ tcl_ring_sel: FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, ret) | FIELD_PREP(DP_TX_DESC_ID_POOL_ID, pool_id); ti.encap_type = ath11k_dp_tx_get_encap_type(arvif, skb); - ti.meta_data_flags = arvif->tcl_metadata; - if (info->control.hw_key) - ti.encrypt_type = - ath11k_dp_tx_get_encrypt_type(info->control.hw_key->cipher); - else - ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN; + if (ieee80211_has_a4(hdr->frame_control) && + is_multicast_ether_addr(hdr->addr3) && arsta && + arsta->use_4addr_set) { + ti.meta_data_flags = arsta->tcl_metadata; + ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TO_FW, 1); + } else { + ti.meta_data_flags = arvif->tcl_metadata; + } + + if (unlikely(ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW)) { + if (skb_cb->flags & ATH11K_SKB_CIPHER_SET) { + ti.encrypt_type = + ath11k_dp_tx_get_encrypt_type(skb_cb->cipher); + + if (ieee80211_has_protected(hdr->frame_control)) + skb_put(skb, IEEE80211_CCMP_MIC_LEN); + } else { + ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN; + } + } ti.addr_search_flags = arvif->hal_addr_search_flags; ti.search_type = arvif->search_type; @@ -147,9 +169,11 @@ tcl_ring_sel: ti.pkt_offset = 0; ti.lmac_id = ar->lmac_id; ti.bss_ast_hash = arvif->ast_hash; + ti.bss_ast_idx = arvif->ast_idx; ti.dscp_tid_tbl_idx = 0; - if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (likely(skb->ip_summed == CHECKSUM_PARTIAL && + ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW)) { ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) | FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) | FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) | @@ -158,7 +182,7 @@ tcl_ring_sel: } if (ieee80211_vif_is_mesh(arvif->vif)) - ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_MESH_ENABLE, 1); + ti.enable_mesh = true; ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE, 1); @@ -169,10 +193,11 @@ tcl_ring_sel: ath11k_dp_tx_encap_nwifi(skb); break; case HAL_TCL_ENCAP_TYPE_RAW: - /* TODO: for CHECKSUM_PARTIAL case in raw mode, HW checksum offload - * is not applicable, hence manual checksum calculation using - * skb_checksum_help() is needed - */ + if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) { + ret = -EINVAL; + goto fail_remove_idr; + } + break; case HAL_TCL_ENCAP_TYPE_ETHERNET: /* no need to encap */ break; @@ -180,11 +205,13 @@ tcl_ring_sel: default: /* TODO: Take care of other encap modes as well */ ret = -EINVAL; + atomic_inc(&ab->soc_stats.tx_err.misc_fail); goto fail_remove_idr; } ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE); - if (dma_mapping_error(ab->dev, ti.paddr)) { + if (unlikely(dma_mapping_error(ab->dev, ti.paddr))) { + atomic_inc(&ab->soc_stats.tx_err.misc_fail); ath11k_warn(ab, "failed to DMA map data Tx buffer\n"); ret = -ENOMEM; goto fail_remove_idr; @@ -203,20 +230,22 @@ tcl_ring_sel: ath11k_hal_srng_access_begin(ab, tcl_ring); hal_tcl_desc = (void *)ath11k_hal_srng_src_get_next_entry(ab, tcl_ring); - if (!hal_tcl_desc) { + if (unlikely(!hal_tcl_desc)) { /* NOTE: It is highly unlikely we'll be running out of tcl_ring * desc because the desc is directly enqueued onto hw queue. */ ath11k_hal_srng_access_end(ab, tcl_ring); + ab->soc_stats.tx_err.desc_na[ti.ring_id]++; spin_unlock_bh(&tcl_ring->lock); ret = -ENOMEM; - /* Checking for available tcl descritors in another ring in + /* Checking for available tcl descriptors in another ring in * case of failure due to full tcl ring now, is better than * checking this ring earlier for each pkt tx. * Restart ring selection if some rings are not checked yet. */ - if (ring_map != (BIT(DP_TCL_NUM_RING_MAX) - 1)) { + if (unlikely(ring_map != (BIT(ab->hw_params.max_tx_ring)) - 1) && + ab->hw_params.tcl_ring_retry && ab->hw_params.max_tx_ring > 1) { tcl_ring_retry = true; ring_selector++; } @@ -229,8 +258,13 @@ tcl_ring_sel: ath11k_hal_srng_access_end(ab, tcl_ring); + ath11k_dp_shadow_start_timer(ab, tcl_ring, &dp->tx_ring_timer[ti.ring_id]); + spin_unlock_bh(&tcl_ring->lock); + ath11k_dbg_dump(ab, ATH11K_DBG_DP_TX, NULL, "dp tx msdu: ", + skb->data, skb->len); + atomic_inc(&ar->dp.num_tx_pending); return 0; @@ -258,20 +292,18 @@ static void ath11k_dp_tx_free_txbuf(struct ath11k_base *ab, u8 mac_id, struct sk_buff *msdu; struct ath11k_skb_cb *skb_cb; - spin_lock_bh(&tx_ring->tx_idr_lock); - msdu = idr_find(&tx_ring->txbuf_idr, msdu_id); - if (!msdu) { + spin_lock(&tx_ring->tx_idr_lock); + msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id); + spin_unlock(&tx_ring->tx_idr_lock); + + if (unlikely(!msdu)) { ath11k_warn(ab, "tx completion for unknown msdu_id %d\n", msdu_id); - spin_unlock_bh(&tx_ring->tx_idr_lock); return; } skb_cb = ATH11K_SKB_CB(msdu); - idr_remove(&tx_ring->txbuf_idr, msdu_id); - spin_unlock_bh(&tx_ring->tx_idr_lock); - dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); dev_kfree_skb_any(msdu); @@ -285,17 +317,20 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab, struct dp_tx_ring *tx_ring, struct ath11k_dp_htt_wbm_tx_status *ts) { + struct ieee80211_tx_status status = {}; struct sk_buff *msdu; struct ieee80211_tx_info *info; struct ath11k_skb_cb *skb_cb; struct ath11k *ar; + struct ath11k_peer *peer; - spin_lock_bh(&tx_ring->tx_idr_lock); - msdu = idr_find(&tx_ring->txbuf_idr, ts->msdu_id); - if (!msdu) { + spin_lock(&tx_ring->tx_idr_lock); + msdu = idr_remove(&tx_ring->txbuf_idr, ts->msdu_id); + spin_unlock(&tx_ring->tx_idr_lock); + + if (unlikely(!msdu)) { ath11k_warn(ab, "htt tx completion for unknown msdu_id %d\n", ts->msdu_id); - spin_unlock_bh(&tx_ring->tx_idr_lock); return; } @@ -304,28 +339,51 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab, ar = skb_cb->ar; - idr_remove(&tx_ring->txbuf_idr, ts->msdu_id); - spin_unlock_bh(&tx_ring->tx_idr_lock); - if (atomic_dec_and_test(&ar->dp.num_tx_pending)) wake_up(&ar->dp.tx_empty_waitq); dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); + if (!skb_cb->vif) { + ieee80211_free_txskb(ar->hw, msdu); + return; + } + memset(&info->status, 0, sizeof(info->status)); if (ts->acked) { if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { info->flags |= IEEE80211_TX_STAT_ACK; - info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR + - ts->ack_rssi; - info->status.is_valid_ack_signal = true; + info->status.ack_signal = ts->ack_rssi; + + if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT, + ab->wmi_ab.svc_map)) + info->status.ack_signal += ATH11K_DEFAULT_NOISE_FLOOR; + + info->status.flags |= + IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; } else { info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; } } - ieee80211_tx_status(ar->hw, msdu); + spin_lock_bh(&ab->base_lock); + peer = ath11k_peer_find_by_id(ab, ts->peer_id); + if (!peer || !peer->sta) { + ath11k_dbg(ab, ATH11K_DBG_DATA, + "dp_tx: failed to find the peer with peer_id %d\n", + ts->peer_id); + spin_unlock_bh(&ab->base_lock); + ieee80211_free_txskb(ar->hw, msdu); + return; + } + spin_unlock_bh(&ab->base_lock); + + status.sta = peer->sta; + status.info = info; + status.skb = msdu; + + ieee80211_tx_status_ext(ar->hw, &status); } static void @@ -334,14 +392,13 @@ ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab, u32 msdu_id, struct dp_tx_ring *tx_ring) { struct htt_tx_wbm_completion *status_desc; - struct ath11k_dp_htt_wbm_tx_status ts = {0}; + struct ath11k_dp_htt_wbm_tx_status ts = {}; enum hal_wbm_htt_tx_comp_status wbm_status; status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET; wbm_status = FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS, status_desc->info0); - switch (wbm_status) { case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK: case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP: @@ -350,7 +407,15 @@ ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab, ts.msdu_id = msdu_id; ts.ack_rssi = FIELD_GET(HTT_TX_WBM_COMP_INFO1_ACK_RSSI, status_desc->info1); + + if (FIELD_GET(HTT_TX_WBM_COMP_INFO2_VALID, status_desc->info2)) + ts.peer_id = FIELD_GET(HTT_TX_WBM_COMP_INFO2_SW_PEER_ID, + status_desc->info2); + else + ts.peer_id = HTT_INVALID_PEER_ID; + ath11k_dp_tx_htt_tx_complete_buf(ab, tx_ring, &ts); + break; case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ: case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT: @@ -384,13 +449,117 @@ static void ath11k_dp_tx_cache_peer_stats(struct ath11k *ar, } } +void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts) +{ + struct ath11k_base *ab = ar->ab; + struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats; + enum hal_tx_rate_stats_pkt_type pkt_type; + enum hal_tx_rate_stats_sgi sgi; + enum hal_tx_rate_stats_bw bw; + struct ath11k_peer *peer; + struct ath11k_sta *arsta; + struct ieee80211_sta *sta; + u16 rate, ru_tones; + u8 mcs, rate_idx = 0, ofdma; + int ret; + + spin_lock_bh(&ab->base_lock); + peer = ath11k_peer_find_by_id(ab, ts->peer_id); + if (!peer || !peer->sta) { + ath11k_dbg(ab, ATH11K_DBG_DP_TX, + "failed to find the peer by id %u\n", ts->peer_id); + goto err_out; + } + + sta = peer->sta; + arsta = ath11k_sta_to_arsta(sta); + + memset(&arsta->txrate, 0, sizeof(arsta->txrate)); + pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE, + ts->rate_stats); + mcs = FIELD_GET(HAL_TX_RATE_STATS_INFO0_MCS, + ts->rate_stats); + sgi = FIELD_GET(HAL_TX_RATE_STATS_INFO0_SGI, + ts->rate_stats); + bw = FIELD_GET(HAL_TX_RATE_STATS_INFO0_BW, ts->rate_stats); + ru_tones = FIELD_GET(HAL_TX_RATE_STATS_INFO0_TONES_IN_RU, ts->rate_stats); + ofdma = FIELD_GET(HAL_TX_RATE_STATS_INFO0_OFDMA_TX, ts->rate_stats); + + /* This is to prefer choose the real NSS value arsta->last_txrate.nss, + * if it is invalid, then choose the NSS value while assoc. + */ + if (arsta->last_txrate.nss) + arsta->txrate.nss = arsta->last_txrate.nss; + else + arsta->txrate.nss = arsta->peer_nss; + + if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11A || + pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11B) { + ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs, + pkt_type, + &rate_idx, + &rate); + if (ret < 0) + goto err_out; + arsta->txrate.legacy = rate; + } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11N) { + if (mcs > 7) { + ath11k_warn(ab, "Invalid HT mcs index %d\n", mcs); + goto err_out; + } + + if (arsta->txrate.nss != 0) + arsta->txrate.mcs = mcs + 8 * (arsta->txrate.nss - 1); + arsta->txrate.flags = RATE_INFO_FLAGS_MCS; + if (sgi) + arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AC) { + if (mcs > 9) { + ath11k_warn(ab, "Invalid VHT mcs index %d\n", mcs); + goto err_out; + } + + arsta->txrate.mcs = mcs; + arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; + if (sgi) + arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) { + if (mcs > 11) { + ath11k_warn(ab, "Invalid HE mcs index %d\n", mcs); + goto err_out; + } + + arsta->txrate.mcs = mcs; + arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; + arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi); + } + + arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw); + if (ofdma && pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) { + arsta->txrate.bw = RATE_INFO_BW_HE_RU; + arsta->txrate.he_ru_alloc = + ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones); + } + + if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) + ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx); + +err_out: + spin_unlock_bh(&ab->base_lock); +} + static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, struct sk_buff *msdu, struct hal_tx_status *ts) { + struct ieee80211_tx_status status = {}; + struct ieee80211_rate_status status_rate = {}; struct ath11k_base *ab = ar->ab; struct ieee80211_tx_info *info; struct ath11k_skb_cb *skb_cb; + struct ath11k_peer *peer; + struct ath11k_sta *arsta; + struct rate_info rate; if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) { /* Must not happen */ @@ -401,16 +570,14 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); - rcu_read_lock(); - - if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) { - dev_kfree_skb_any(msdu); - goto exit; + if (unlikely(!rcu_access_pointer(ab->pdevs_active[ar->pdev_idx]))) { + ieee80211_free_txskb(ar->hw, msdu); + return; } - if (!skb_cb->vif) { - dev_kfree_skb_any(msdu); - goto exit; + if (unlikely(!skb_cb->vif)) { + ieee80211_free_txskb(ar->hw, msdu); + return; } info = IEEE80211_SKB_CB(msdu); @@ -422,16 +589,21 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED && !(info->flags & IEEE80211_TX_CTL_NO_ACK)) { info->flags |= IEEE80211_TX_STAT_ACK; - info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR + - ts->ack_rssi; - info->status.is_valid_ack_signal = true; + info->status.ack_signal = ts->ack_rssi; + + if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT, + ab->wmi_ab.svc_map)) + info->status.ack_signal += ATH11K_DEFAULT_NOISE_FLOOR; + + info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; } if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX && (info->flags & IEEE80211_TX_CTL_NO_ACK)) info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; - if (ath11k_debug_is_extd_tx_stats_enabled(ar)) { + if (unlikely(ath11k_debugfs_is_extd_tx_stats_enabled(ar)) || + ab->hw_params.single_pdev_only) { if (ts->flags & HAL_TX_STATUS_FLAGS_FIRST_MSDU) { if (ar->last_ppdu_id == 0) { ar->last_ppdu_id = ts->ppdu_id; @@ -439,12 +611,12 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, ar->cached_ppdu_id == ar->last_ppdu_id) { ar->cached_ppdu_id = ar->last_ppdu_id; ar->cached_stats.is_ampdu = true; - ath11k_update_per_peer_stats_from_txcompl(ar, msdu, ts); + ath11k_dp_tx_update_txcompl(ar, ts); memset(&ar->cached_stats, 0, sizeof(struct ath11k_per_peer_tx_stats)); } else { ar->cached_stats.is_ampdu = false; - ath11k_update_per_peer_stats_from_txcompl(ar, msdu, ts); + ath11k_dp_tx_update_txcompl(ar, ts); memset(&ar->cached_stats, 0, sizeof(struct ath11k_per_peer_tx_stats)); } @@ -454,15 +626,31 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, ath11k_dp_tx_cache_peer_stats(ar, msdu, ts); } - /* NOTE: Tx rate status reporting. Tx completion status does not have - * necessary information (for example nss) to build the tx rate. - * Might end up reporting it out-of-band from HTT stats. - */ + spin_lock_bh(&ab->base_lock); + peer = ath11k_peer_find_by_id(ab, ts->peer_id); + if (!peer || !peer->sta) { + ath11k_dbg(ab, ATH11K_DBG_DATA, + "dp_tx: failed to find the peer with peer_id %d\n", + ts->peer_id); + spin_unlock_bh(&ab->base_lock); + ieee80211_free_txskb(ar->hw, msdu); + return; + } + arsta = ath11k_sta_to_arsta(peer->sta); + status.sta = peer->sta; + status.skb = msdu; + status.info = info; + rate = arsta->last_txrate; + + status_rate.rate_idx = rate; + status_rate.try_count = 1; + + status.rates = &status_rate; + status.n_rates = 1; - ieee80211_tx_status(ar->hw, msdu); + spin_unlock_bh(&ab->base_lock); -exit: - rcu_read_unlock(); + ieee80211_tx_status_ext(ar->hw, &status); } static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab, @@ -471,11 +659,11 @@ static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab, { ts->buf_rel_source = FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE, desc->info0); - if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW && - ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM) + if (unlikely(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW && + ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) return; - if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) + if (unlikely(ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)) return; ts->status = FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON, @@ -503,12 +691,14 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id) int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id; struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id]; struct sk_buff *msdu; - struct hal_tx_status ts = { 0 }; + struct hal_tx_status ts = {}; struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id]; u32 *desc; u32 msdu_id; u8 mac_id; + spin_lock_bh(&status_ring->lock); + ath11k_hal_srng_access_begin(ab, status_ring); while ((ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) != @@ -520,14 +710,17 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id) ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head); } - if ((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) && - (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) == tx_ring->tx_status_tail)) { + if (unlikely((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) && + (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) == + tx_ring->tx_status_tail))) { /* TODO: Process pending tx_status messages when kfifo_is_full() */ ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n"); } ath11k_hal_srng_access_end(ab, status_ring); + spin_unlock_bh(&status_ring->lock); + while (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) { struct hal_wbm_release_ring *tx_status; u32 desc_id; @@ -542,7 +735,7 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id) mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, desc_id); msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, desc_id); - if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) { + if (unlikely(ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)) { ath11k_dp_tx_process_htt_tx_complete(ab, (void *)tx_status, mac_id, msdu_id, @@ -550,16 +743,16 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id) continue; } - spin_lock_bh(&tx_ring->tx_idr_lock); - msdu = idr_find(&tx_ring->txbuf_idr, msdu_id); - if (!msdu) { + spin_lock(&tx_ring->tx_idr_lock); + msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id); + if (unlikely(!msdu)) { ath11k_warn(ab, "tx completion for unknown msdu_id %d\n", msdu_id); - spin_unlock_bh(&tx_ring->tx_idr_lock); + spin_unlock(&tx_ring->tx_idr_lock); continue; } - idr_remove(&tx_ring->txbuf_idr, msdu_id); - spin_unlock_bh(&tx_ring->tx_idr_lock); + + spin_unlock(&tx_ring->tx_idr_lock); ar = ab->pdevs[mac_id].ar; @@ -581,6 +774,9 @@ int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid, struct hal_srng *cmd_ring; int cmd_num; + if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags)) + return -ESHUTDOWN; + cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd); @@ -596,7 +792,7 @@ int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid, return 0; /* Can this be optimized so that we keep the pending command list only - * for tid delete command to free up the resoruce on the command status + * for tid delete command to free up the resource on the command status * indication? */ dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC); @@ -628,14 +824,28 @@ ath11k_dp_tx_get_ring_id_type(struct ath11k_base *ab, switch (ring_type) { case HAL_RXDMA_BUF: lmac_ring_id_offset = mac_id * HAL_SRNG_RINGS_PER_LMAC; - if (!(ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF + - lmac_ring_id_offset) || - ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF + - lmac_ring_id_offset))) { - ret = -EINVAL; + + /* for QCA6390, host fills rx buffer to fw and fw fills to + * rxbuf ring for each rxdma + */ + if (!ab->hw_params.rx_mac_buf_ring) { + if (!(ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF + + lmac_ring_id_offset) || + ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF + + lmac_ring_id_offset))) { + ret = -EINVAL; + } + *htt_ring_id = HTT_RXDMA_HOST_BUF_RING; + *htt_ring_type = HTT_SW_TO_HW_RING; + } else { + if (ring_id == HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF) { + *htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING; + *htt_ring_type = HTT_SW_TO_SW_RING; + } else { + *htt_ring_id = HTT_RXDMA_HOST_BUF_RING; + *htt_ring_type = HTT_SW_TO_HW_RING; + } } - *htt_ring_id = HTT_RXDMA_HOST_BUF_RING; - *htt_ring_type = HTT_SW_TO_HW_RING; break; case HAL_RXDMA_DST: *htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING; @@ -715,7 +925,7 @@ int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id, cmd->ring_base_addr_hi = (u64)params.ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT; - ret = ath11k_hal_srng_get_entrysize(ring_type); + ret = ath11k_hal_srng_get_entrysize(ab, ring_type); if (ret < 0) goto err_free; @@ -745,9 +955,9 @@ int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id, cmd->ring_tail_off32_remote_addr_hi = (u64)tp_addr >> HAL_ADDR_MSB_REG_SHIFT; - cmd->ring_msi_addr_lo = 0; - cmd->ring_msi_addr_hi = 0; - cmd->msi_data = 0; + cmd->ring_msi_addr_lo = lower_32_bits(params.msi_addr); + cmd->ring_msi_addr_hi = upper_32_bits(params.msi_addr); + cmd->msi_data = params.msi_data; cmd->intr_info = FIELD_PREP( HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH, @@ -763,6 +973,11 @@ int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id, params.low_threshold); } + ath11k_dbg(ab, ATH11K_DBG_DP_TX, + "htt srng setup msi_addr_lo 0x%x msi_addr_hi 0x%x msi_data 0x%x ring_id %d ring_type %d intr_info 0x%x flags 0x%x\n", + cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi, + cmd->msi_data, ring_id, ring_type, cmd->intr_info, cmd->info2); + ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb); if (ret) goto err_free; @@ -812,7 +1027,7 @@ int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab) if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) { ath11k_err(ab, "unsupported htt major version %d supported version is %d\n", dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR); - return -ENOTSUPP; + return -EOPNOTSUPP; } return 0; @@ -827,24 +1042,27 @@ int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask) int len = sizeof(*cmd); u8 pdev_mask; int ret; - - skb = ath11k_htc_alloc_skb(ab, len); - if (!skb) - return -ENOMEM; - - skb_put(skb, len); - cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data; - cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE, - HTT_H2T_MSG_TYPE_PPDU_STATS_CFG); - - pdev_mask = 1 << (ar->pdev_idx); - cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask); - cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask); - - ret = ath11k_htc_send(&ab->htc, dp->eid, skb); - if (ret) { - dev_kfree_skb_any(skb); - return ret; + int i; + + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { + skb = ath11k_htc_alloc_skb(ab, len); + if (!skb) + return -ENOMEM; + + skb_put(skb, len); + cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data; + cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE, + HTT_H2T_MSG_TYPE_PPDU_STATS_CFG); + + pdev_mask = 1 << (ar->pdev_idx + i); + cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask); + cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask); + + ret = ath11k_htc_send(&ab->htc, dp->eid, skb); + if (ret) { + dev_kfree_skb_any(skb); + return ret; + } } return 0; @@ -926,6 +1144,7 @@ ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type, struct ath11k_dp *dp = &ab->dp; struct sk_buff *skb; struct htt_ext_stats_cfg_cmd *cmd; + u32 pdev_id; int len = sizeof(*cmd); int ret; @@ -939,7 +1158,12 @@ ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type, memset(cmd, 0, sizeof(*cmd)); cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG; - cmd->hdr.pdev_mask = 1 << ar->pdev->pdev_id; + if (ab->hw_params.single_pdev_only) + pdev_id = ath11k_mac_get_target_pdev_id(ar); + else + pdev_id = ar->pdev->pdev_id; + + cmd->hdr.pdev_mask = 1 << pdev_id; cmd->hdr.stats_type = type; cmd->cfg_param0 = cfg_params->cfg0; @@ -963,8 +1187,18 @@ ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type, int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset) { struct ath11k_pdev_dp *dp = &ar->dp; - struct htt_rx_ring_tlv_filter tlv_filter = {0}; - int ret = 0, ring_id = 0; + struct ath11k_base *ab = ar->ab; + struct htt_rx_ring_tlv_filter tlv_filter = {}; + int ret = 0, ring_id = 0, i; + + if (ab->hw_params.full_monitor_mode) { + ret = ath11k_dp_tx_htt_rx_full_mon_setup(ab, + dp->mac_id, !reset); + if (ret < 0) { + ath11k_err(ab, "failed to setup full monitor %d\n", ret); + return ret; + } + } ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; @@ -986,23 +1220,87 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset) HTT_RX_MON_MO_DATA_FILTER_FLASG3; } - ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id, - HAL_RXDMA_MONITOR_BUF, - DP_RXDMA_REFILL_RING_SIZE, - &tlv_filter); + if (ab->hw_params.rxdma1_enable) { + ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id, + HAL_RXDMA_MONITOR_BUF, + DP_RXDMA_REFILL_RING_SIZE, + &tlv_filter); + } else if (!reset) { + /* set in monitor mode only */ + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { + ring_id = dp->rx_mac_buf_ring[i].ring_id; + ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, + dp->mac_id + i, + HAL_RXDMA_BUF, + 1024, + &tlv_filter); + } + } + if (ret) return ret; - ring_id = dp->rx_mon_status_refill_ring.refill_buf_ring.ring_id; - if (!reset) - tlv_filter.rx_filter = - HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING; - else - tlv_filter = ath11k_mac_mon_status_filter_default; + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { + ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; + if (!reset) { + tlv_filter.rx_filter = + HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING; + } else { + tlv_filter = ath11k_mac_mon_status_filter_default; + + if (ath11k_debugfs_is_extd_rx_stats_enabled(ar)) + tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar); + } + + ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id, + dp->mac_id + i, + HAL_RXDMA_MONITOR_STATUS, + DP_RXDMA_REFILL_RING_SIZE, + &tlv_filter); + } + + if (!ar->ab->hw_params.rxdma1_enable) + mod_timer(&ar->ab->mon_reap_timer, jiffies + + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); + + return ret; +} + +int ath11k_dp_tx_htt_rx_full_mon_setup(struct ath11k_base *ab, int mac_id, + bool config) +{ + struct htt_rx_full_monitor_mode_cfg_cmd *cmd; + struct sk_buff *skb; + int ret, len = sizeof(*cmd); + + skb = ath11k_htc_alloc_skb(ab, len); + if (!skb) + return -ENOMEM; + + skb_put(skb, len); + cmd = (struct htt_rx_full_monitor_mode_cfg_cmd *)skb->data; + memset(cmd, 0, sizeof(*cmd)); + cmd->info0 = FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_MSG_TYPE, + HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE); + + cmd->info0 |= FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_PDEV_ID, mac_id); + + cmd->cfg = HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ENABLE | + FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_RELEASE_RING, + HTT_RX_MON_RING_SW); + if (config) { + cmd->cfg |= HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ZERO_MPDUS_END | + HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_NON_ZERO_MPDUS_END; + } + + ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb); + if (ret) + goto err_free; + + return 0; + +err_free: + dev_kfree_skb_any(skb); - ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id, - HAL_RXDMA_MONITOR_STATUS, - DP_RXDMA_REFILL_RING_SIZE, - &tlv_filter); return ret; } |
