summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/ath/ath12k/dp_tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath12k/dp_tx.c')
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c728
1 files changed, 595 insertions, 133 deletions
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
index 62f9cdbb811c..b6816b6c2c04 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
@@ -1,16 +1,19 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#include "dp_tx.h"
#include "debug.h"
+#include "debugfs.h"
#include "hw.h"
+#include "peer.h"
+#include "mac.h"
static enum hal_tcl_encap_type
-ath12k_dp_tx_get_encap_type(struct ath12k_vif *arvif, struct sk_buff *skb)
+ath12k_dp_tx_get_encap_type(struct ath12k_link_vif *arvif, struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath12k_base *ab = arvif->ar->ab;
@@ -81,6 +84,7 @@ static void ath12k_dp_tx_release_txbuf(struct ath12k_dp *dp,
u8 pool_id)
{
spin_lock_bh(&dp->tx_desc_lock[pool_id]);
+ tx_desc->skb_ext_desc = NULL;
list_move_tail(&tx_desc->list, &dp->tx_desc_free_list[pool_id]);
spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
}
@@ -117,15 +121,108 @@ static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab,
le32_encode_bits(ti->data_len,
HAL_TX_MSDU_EXT_INFO1_BUF_LEN);
- tcl_ext_cmd->info1 = le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
+ tcl_ext_cmd->info1 |= le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
le32_encode_bits(ti->encap_type,
HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE) |
le32_encode_bits(ti->encrypt_type,
HAL_TX_MSDU_EXT_INFO1_ENCRYPT_TYPE);
}
-int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
- struct sk_buff *skb)
+#define HTT_META_DATA_ALIGNMENT 0x8
+
+static void *ath12k_dp_metadata_align_skb(struct sk_buff *skb, u8 tail_len)
+{
+ struct sk_buff *tail;
+ void *metadata;
+
+ if (unlikely(skb_cow_data(skb, tail_len, &tail) < 0))
+ return NULL;
+
+ metadata = pskb_put(skb, tail, tail_len);
+ memset(metadata, 0, tail_len);
+ return metadata;
+}
+
+/* Preparing HTT Metadata when utilized with ext MSDU */
+static int ath12k_dp_prepare_htt_metadata(struct sk_buff *skb)
+{
+ struct hal_tx_msdu_metadata *desc_ext;
+ u8 htt_desc_size;
+ /* Size rounded of multiple of 8 bytes */
+ u8 htt_desc_size_aligned;
+
+ htt_desc_size = sizeof(struct hal_tx_msdu_metadata);
+ htt_desc_size_aligned = ALIGN(htt_desc_size, HTT_META_DATA_ALIGNMENT);
+
+ desc_ext = ath12k_dp_metadata_align_skb(skb, htt_desc_size_aligned);
+ if (!desc_ext)
+ return -ENOMEM;
+
+ desc_ext->info0 = le32_encode_bits(1, HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_FLAG) |
+ le32_encode_bits(0, HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_TYPE) |
+ le32_encode_bits(1,
+ HAL_TX_MSDU_METADATA_INFO0_HOST_TX_DESC_POOL);
+
+ return 0;
+}
+
+static void ath12k_dp_tx_move_payload(struct sk_buff *skb,
+ unsigned long delta,
+ bool head)
+{
+ unsigned long len = skb->len;
+
+ if (head) {
+ skb_push(skb, delta);
+ memmove(skb->data, skb->data + delta, len);
+ skb_trim(skb, len);
+ } else {
+ skb_put(skb, delta);
+ memmove(skb->data + delta, skb->data, len);
+ skb_pull(skb, delta);
+ }
+}
+
+static int ath12k_dp_tx_align_payload(struct ath12k_base *ab,
+ struct sk_buff **pskb)
+{
+ u32 iova_mask = ab->hw_params->iova_mask;
+ unsigned long offset, delta1, delta2;
+ struct sk_buff *skb2, *skb = *pskb;
+ unsigned int headroom = skb_headroom(skb);
+ int tailroom = skb_tailroom(skb);
+ int ret = 0;
+
+ offset = (unsigned long)skb->data & iova_mask;
+ delta1 = offset;
+ delta2 = iova_mask - offset + 1;
+
+ if (headroom >= delta1) {
+ ath12k_dp_tx_move_payload(skb, delta1, true);
+ } else if (tailroom >= delta2) {
+ ath12k_dp_tx_move_payload(skb, delta2, false);
+ } else {
+ skb2 = skb_realloc_headroom(skb, iova_mask);
+ if (!skb2) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dev_kfree_skb_any(skb);
+
+ offset = (unsigned long)skb2->data & iova_mask;
+ if (offset)
+ ath12k_dp_tx_move_payload(skb2, offset, true);
+ *pskb = skb2;
+ }
+
+out:
+ return ret;
+}
+
+int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
+ struct sk_buff *skb, bool gsn_valid, int mcbc_gsn,
+ bool is_mcast)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_dp *dp = &ab->dp;
@@ -135,9 +232,10 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
struct hal_tcl_data_cmd *hal_tcl_desc;
struct hal_tx_msdu_ext_desc *msg;
- struct sk_buff *skb_ext_desc;
+ struct sk_buff *skb_ext_desc = NULL;
struct hal_srng *tcl_ring;
struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ath12k_vif *ahvif = arvif->ahvif;
struct dp_tx_ring *tx_ring;
u8 pool_id;
u8 hal_ring_id;
@@ -145,13 +243,15 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
u8 ring_selector, ring_map = 0;
bool tcl_ring_retry;
bool msdu_ext_desc = false;
+ bool add_htt_metadata = false;
+ u32 iova_mask = ab->hw_params->iova_mask;
if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
return -ESHUTDOWN;
if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
!ieee80211_is_data(hdr->frame_control))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
pool_id = skb_get_queue_mapping(skb) & (ATH12K_HW_MAX_QUEUES - 1);
@@ -180,7 +280,7 @@ tcl_ring_sel:
ti.bank_id = arvif->bank_id;
ti.meta_data_flags = arvif->tcl_metadata;
- if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
+ if (ahvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) {
if (skb_cb->flags & ATH12K_SKB_CIPHER_SET) {
ti.encrypt_type =
@@ -195,13 +295,27 @@ tcl_ring_sel:
msdu_ext_desc = true;
}
+ if (gsn_valid) {
+ /* Reset and Initialize meta_data_flags with Global Sequence
+ * Number (GSN) info.
+ */
+ ti.meta_data_flags =
+ u32_encode_bits(HTT_TCL_META_DATA_TYPE_GLOBAL_SEQ_NUM,
+ HTT_TCL_META_DATA_TYPE) |
+ u32_encode_bits(mcbc_gsn, HTT_TCL_META_DATA_GLOBAL_SEQ_NUM);
+ }
+
ti.encap_type = ath12k_dp_tx_get_encap_type(arvif, skb);
ti.addr_search_flags = arvif->hal_addr_search_flags;
ti.search_type = arvif->search_type;
ti.type = HAL_TCL_DESC_TYPE_BUFFER;
ti.pkt_offset = 0;
ti.lmac_id = ar->lmac_id;
+
ti.vdev_id = arvif->vdev_id;
+ if (gsn_valid)
+ ti.vdev_id += HTT_TX_MLO_MCAST_HOST_REINJECT_BASE_VDEV_ID;
+
ti.bss_ast_hash = arvif->ast_hash;
ti.bss_ast_idx = arvif->ast_idx;
ti.dscp_tid_tbl_idx = 0;
@@ -236,24 +350,54 @@ tcl_ring_sel:
default:
/* TODO: Take care of other encap modes as well */
ret = -EINVAL;
- atomic_inc(&ab->soc_stats.tx_err.misc_fail);
+ atomic_inc(&ab->device_stats.tx_err.misc_fail);
goto fail_remove_tx_buf;
}
+ if (iova_mask &&
+ (unsigned long)skb->data & iova_mask) {
+ ret = ath12k_dp_tx_align_payload(ab, &skb);
+ if (ret) {
+ ath12k_warn(ab, "failed to align TX buffer %d\n", ret);
+ /* don't bail out, give original buffer
+ * a chance even unaligned.
+ */
+ goto map;
+ }
+
+ /* hdr is pointing to a wrong place after alignment,
+ * so refresh it for later use.
+ */
+ hdr = (void *)skb->data;
+ }
+map:
ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(ab->dev, ti.paddr)) {
- atomic_inc(&ab->soc_stats.tx_err.misc_fail);
+ atomic_inc(&ab->device_stats.tx_err.misc_fail);
ath12k_warn(ab, "failed to DMA map data Tx buffer\n");
ret = -ENOMEM;
goto fail_remove_tx_buf;
}
+ if (!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
+ !(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) &&
+ !(skb_cb->flags & ATH12K_SKB_CIPHER_SET) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ /* Add metadata for sw encrypted vlan group traffic */
+ add_htt_metadata = true;
+ msdu_ext_desc = true;
+ ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TO_FW);
+ ti.meta_data_flags |= HTT_TCL_META_DATA_VALID_HTT;
+ ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW;
+ ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+ }
+
tx_desc->skb = skb;
tx_desc->mac_id = ar->pdev_idx;
ti.desc_id = tx_desc->desc_id;
ti.data_len = skb->len;
skb_cb->paddr = ti.paddr;
- skb_cb->vif = arvif->vif;
+ skb_cb->vif = ahvif->vif;
skb_cb->ar = ar;
if (msdu_ext_desc) {
@@ -269,18 +413,26 @@ tcl_ring_sel:
msg = (struct hal_tx_msdu_ext_desc *)skb_ext_desc->data;
ath12k_hal_tx_cmd_ext_desc_setup(ab, msg, &ti);
+ if (add_htt_metadata) {
+ ret = ath12k_dp_prepare_htt_metadata(skb_ext_desc);
+ if (ret < 0) {
+ ath12k_dbg(ab, ATH12K_DBG_DP_TX,
+ "Failed to add HTT meta data, dropping packet\n");
+ goto fail_free_ext_skb;
+ }
+ }
+
ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data,
skb_ext_desc->len, DMA_TO_DEVICE);
ret = dma_mapping_error(ab->dev, ti.paddr);
- if (ret) {
- kfree_skb(skb_ext_desc);
- goto fail_unmap_dma;
- }
+ if (ret)
+ goto fail_free_ext_skb;
ti.data_len = skb_ext_desc->len;
ti.type = HAL_TCL_DESC_TYPE_EXT_DESC;
skb_cb->paddr_ext_desc = ti.paddr;
+ tx_desc->skb_ext_desc = skb_ext_desc;
}
hal_ring_id = tx_ring->tcl_data_ring.ring_id;
@@ -296,7 +448,7 @@ tcl_ring_sel:
* desc because the desc is directly enqueued onto hw queue.
*/
ath12k_hal_srng_access_end(ab, tcl_ring);
- ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
+ ab->device_stats.tx_err.desc_na[ti.ring_id]++;
spin_unlock_bh(&tcl_ring->lock);
ret = -ENOMEM;
@@ -311,9 +463,22 @@ tcl_ring_sel:
ring_selector++;
}
- goto fail_unmap_dma;
+ goto fail_unmap_dma_ext;
}
+ spin_lock_bh(&arvif->link_stats_lock);
+ arvif->link_stats.tx_encap_type[ti.encap_type]++;
+ arvif->link_stats.tx_encrypt_type[ti.encrypt_type]++;
+ arvif->link_stats.tx_desc_type[ti.type]++;
+
+ if (is_mcast)
+ arvif->link_stats.tx_bcast_mcast++;
+ else
+ arvif->link_stats.tx_enqueued++;
+ spin_unlock_bh(&arvif->link_stats_lock);
+
+ ab->device_stats.tx_enqueued[ti.ring_id]++;
+
ath12k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc, &ti);
ath12k_hal_srng_access_end(ab, tcl_ring);
@@ -327,16 +492,24 @@ tcl_ring_sel:
return 0;
-fail_unmap_dma:
- dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
-
+fail_unmap_dma_ext:
if (skb_cb->paddr_ext_desc)
dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
- sizeof(struct hal_tx_msdu_ext_desc),
+ skb_ext_desc->len,
DMA_TO_DEVICE);
+fail_free_ext_skb:
+ kfree_skb(skb_ext_desc);
+
+fail_unmap_dma:
+ dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
fail_remove_tx_buf:
ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id);
+
+ spin_lock_bh(&arvif->link_stats_lock);
+ arvif->link_stats.tx_dropped++;
+ spin_unlock_bh(&arvif->link_stats_lock);
+
if (tcl_ring_retry)
goto tcl_ring_sel;
@@ -344,93 +517,121 @@ fail_remove_tx_buf:
}
static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab,
- struct sk_buff *msdu, u8 mac_id,
- struct dp_tx_ring *tx_ring)
+ struct dp_tx_ring *tx_ring,
+ struct ath12k_tx_desc_params *desc_params)
{
struct ath12k *ar;
+ struct sk_buff *msdu = desc_params->skb;
struct ath12k_skb_cb *skb_cb;
- u8 pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
+ u8 pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, desc_params->mac_id);
skb_cb = ATH12K_SKB_CB(msdu);
+ ar = ab->pdevs[pdev_id].ar;
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
- if (skb_cb->paddr_ext_desc)
+ if (skb_cb->paddr_ext_desc) {
dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
- sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
+ desc_params->skb_ext_desc->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(desc_params->skb_ext_desc);
+ }
- dev_kfree_skb_any(msdu);
+ ieee80211_free_txskb(ar->ah->hw, msdu);
- ar = ab->pdevs[pdev_id].ar;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
}
static void
ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
- struct sk_buff *msdu,
+ struct ath12k_tx_desc_params *desc_params,
struct dp_tx_ring *tx_ring,
struct ath12k_dp_htt_wbm_tx_status *ts)
{
struct ieee80211_tx_info *info;
+ struct ath12k_link_vif *arvif;
struct ath12k_skb_cb *skb_cb;
+ struct ieee80211_vif *vif;
+ struct ath12k_vif *ahvif;
struct ath12k *ar;
+ struct sk_buff *msdu = desc_params->skb;
skb_cb = ATH12K_SKB_CB(msdu);
info = IEEE80211_SKB_CB(msdu);
ar = skb_cb->ar;
+ ab->device_stats.tx_completed[tx_ring->tcl_data_ring_id]++;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
- if (skb_cb->paddr_ext_desc)
+ if (skb_cb->paddr_ext_desc) {
dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
- sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
+ desc_params->skb_ext_desc->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(desc_params->skb_ext_desc);
+ }
+
+ vif = skb_cb->vif;
+ if (vif) {
+ ahvif = ath12k_vif_to_ahvif(vif);
+ rcu_read_lock();
+ arvif = rcu_dereference(ahvif->link[skb_cb->link_id]);
+ if (arvif) {
+ spin_lock_bh(&arvif->link_stats_lock);
+ arvif->link_stats.tx_completed++;
+ spin_unlock_bh(&arvif->link_stats_lock);
+ }
+ rcu_read_unlock();
+ }
memset(&info->status, 0, sizeof(info->status));
if (ts->acked) {
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
info->flags |= IEEE80211_TX_STAT_ACK;
- info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR +
- ts->ack_rssi;
+ info->status.ack_signal = ts->ack_rssi;
+
+ if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ab->wmi_ab.svc_map))
+ info->status.ack_signal += ATH12K_DEFAULT_NOISE_FLOOR;
+
info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
} else {
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
}
}
- ieee80211_tx_status_skb(ar->hw, msdu);
+ ieee80211_tx_status_skb(ath12k_ar_to_hw(ar), msdu);
}
static void
-ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
- void *desc, u8 mac_id,
- struct sk_buff *msdu,
- struct dp_tx_ring *tx_ring)
+ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab, void *desc,
+ struct dp_tx_ring *tx_ring,
+ struct ath12k_tx_desc_params *desc_params)
{
struct htt_tx_wbm_completion *status_desc;
struct ath12k_dp_htt_wbm_tx_status ts = {0};
enum hal_wbm_htt_tx_comp_status wbm_status;
- status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
+ status_desc = desc;
wbm_status = le32_get_bits(status_desc->info0,
HTT_TX_WBM_COMP_INFO0_STATUS);
+ ab->device_stats.fw_tx_status[wbm_status]++;
switch (wbm_status) {
case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
- case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
- case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
ts.ack_rssi = le32_get_bits(status_desc->info2,
HTT_TX_WBM_COMP_INFO2_ACK_RSSI);
- ath12k_dp_tx_htt_tx_complete_buf(ab, msdu, tx_ring, &ts);
+ ath12k_dp_tx_htt_tx_complete_buf(ab, desc_params, tx_ring, &ts);
break;
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
- ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring);
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_VDEVID_MISMATCH:
+ ath12k_dp_tx_free_txbuf(ab, tx_ring, desc_params);
break;
case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
/* This event is to be handled only when the driver decides to
@@ -438,18 +639,142 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
*/
break;
default:
- ath12k_warn(ab, "Unknown htt tx status %d\n", wbm_status);
+ ath12k_warn(ab, "Unknown htt wbm tx status %d\n", wbm_status);
+ break;
+ }
+}
+
+static void ath12k_dp_tx_update_txcompl(struct ath12k *ar, struct hal_tx_status *ts)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_peer *peer;
+ struct ieee80211_sta *sta;
+ struct ath12k_sta *ahsta;
+ struct ath12k_link_sta *arsta;
+ struct rate_info txrate = {0};
+ u16 rate, ru_tones;
+ u8 rate_idx = 0;
+ int ret;
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find_by_id(ab, ts->peer_id);
+ if (!peer || !peer->sta) {
+ ath12k_dbg(ab, ATH12K_DBG_DP_TX,
+ "failed to find the peer by id %u\n", ts->peer_id);
+ spin_unlock_bh(&ab->base_lock);
+ return;
+ }
+ sta = peer->sta;
+ ahsta = ath12k_sta_to_ahsta(sta);
+ arsta = &ahsta->deflink;
+
+ /* This is to prefer choose the real NSS value arsta->last_txrate.nss,
+ * if it is invalid, then choose the NSS value while assoc.
+ */
+ if (arsta->last_txrate.nss)
+ txrate.nss = arsta->last_txrate.nss;
+ else
+ txrate.nss = arsta->peer_nss;
+ spin_unlock_bh(&ab->base_lock);
+
+ switch (ts->pkt_type) {
+ case HAL_TX_RATE_STATS_PKT_TYPE_11A:
+ case HAL_TX_RATE_STATS_PKT_TYPE_11B:
+ ret = ath12k_mac_hw_ratecode_to_legacy_rate(ts->mcs,
+ ts->pkt_type,
+ &rate_idx,
+ &rate);
+ if (ret < 0) {
+ ath12k_warn(ab, "Invalid tx legacy rate %d\n", ret);
+ return;
+ }
+
+ txrate.legacy = rate;
+ break;
+ case HAL_TX_RATE_STATS_PKT_TYPE_11N:
+ if (ts->mcs > ATH12K_HT_MCS_MAX) {
+ ath12k_warn(ab, "Invalid HT mcs index %d\n", ts->mcs);
+ return;
+ }
+
+ if (txrate.nss != 0)
+ txrate.mcs = ts->mcs + 8 * (txrate.nss - 1);
+
+ txrate.flags = RATE_INFO_FLAGS_MCS;
+
+ if (ts->sgi)
+ txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
break;
+ case HAL_TX_RATE_STATS_PKT_TYPE_11AC:
+ if (ts->mcs > ATH12K_VHT_MCS_MAX) {
+ ath12k_warn(ab, "Invalid VHT mcs index %d\n", ts->mcs);
+ return;
+ }
+
+ txrate.mcs = ts->mcs;
+ txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
+
+ if (ts->sgi)
+ txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case HAL_TX_RATE_STATS_PKT_TYPE_11AX:
+ if (ts->mcs > ATH12K_HE_MCS_MAX) {
+ ath12k_warn(ab, "Invalid HE mcs index %d\n", ts->mcs);
+ return;
+ }
+
+ txrate.mcs = ts->mcs;
+ txrate.flags = RATE_INFO_FLAGS_HE_MCS;
+ txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(ts->sgi);
+ break;
+ case HAL_TX_RATE_STATS_PKT_TYPE_11BE:
+ if (ts->mcs > ATH12K_EHT_MCS_MAX) {
+ ath12k_warn(ab, "Invalid EHT mcs index %d\n", ts->mcs);
+ return;
+ }
+
+ txrate.mcs = ts->mcs;
+ txrate.flags = RATE_INFO_FLAGS_EHT_MCS;
+ txrate.eht_gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(ts->sgi);
+ break;
+ default:
+ ath12k_warn(ab, "Invalid tx pkt type: %d\n", ts->pkt_type);
+ return;
}
+
+ txrate.bw = ath12k_mac_bw_to_mac80211_bw(ts->bw);
+
+ if (ts->ofdma && ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
+ txrate.bw = RATE_INFO_BW_HE_RU;
+ ru_tones = ath12k_mac_he_convert_tones_to_ru_tones(ts->tones);
+ txrate.he_ru_alloc =
+ ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
+ }
+
+ if (ts->ofdma && ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11BE) {
+ txrate.bw = RATE_INFO_BW_EHT_RU;
+ txrate.eht_ru_alloc =
+ ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(ts->tones);
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ arsta->txrate = txrate;
+ spin_unlock_bh(&ab->base_lock);
}
static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
- struct sk_buff *msdu,
- struct hal_tx_status *ts)
+ struct ath12k_tx_desc_params *desc_params,
+ struct hal_tx_status *ts,
+ int ring)
{
struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ar->ah;
struct ieee80211_tx_info *info;
+ struct ath12k_link_vif *arvif;
struct ath12k_skb_cb *skb_cb;
+ struct ieee80211_vif *vif;
+ struct ath12k_vif *ahvif;
+ struct sk_buff *msdu = desc_params->skb;
if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
/* Must not happen */
@@ -457,48 +782,86 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
}
skb_cb = ATH12K_SKB_CB(msdu);
+ ab->device_stats.tx_completed[ring]++;
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
- if (skb_cb->paddr_ext_desc)
+ if (skb_cb->paddr_ext_desc) {
dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
- sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
+ desc_params->skb_ext_desc->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(desc_params->skb_ext_desc);
+ }
rcu_read_lock();
if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) {
- dev_kfree_skb_any(msdu);
+ ieee80211_free_txskb(ah->hw, msdu);
goto exit;
}
if (!skb_cb->vif) {
- dev_kfree_skb_any(msdu);
+ ieee80211_free_txskb(ah->hw, msdu);
goto exit;
}
+ vif = skb_cb->vif;
+ if (vif) {
+ ahvif = ath12k_vif_to_ahvif(vif);
+ arvif = rcu_dereference(ahvif->link[skb_cb->link_id]);
+ if (arvif) {
+ spin_lock_bh(&arvif->link_stats_lock);
+ arvif->link_stats.tx_completed++;
+ spin_unlock_bh(&arvif->link_stats_lock);
+ }
+ }
+
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
/* skip tx rate update from ieee80211_status*/
info->status.rates[0].idx = -1;
- if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
- !(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
- info->flags |= IEEE80211_TX_STAT_ACK;
- info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR +
- ts->ack_rssi;
- info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
- }
+ switch (ts->status) {
+ case HAL_WBM_TQM_REL_REASON_FRAME_ACKED:
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ts->ack_rssi;
+
+ if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ab->wmi_ab.svc_map))
+ info->status.ack_signal += ATH12K_DEFAULT_NOISE_FLOOR;
- if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
- (info->flags & IEEE80211_TX_CTL_NO_ACK))
- info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
+ }
+ break;
+ case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX:
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ break;
+ }
+ fallthrough;
+ case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU:
+ case HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD:
+ case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES:
+ /* The failure status is due to internal firmware tx failure
+ * hence drop the frame; do not update the status of frame to
+ * the upper layer
+ */
+ ieee80211_free_txskb(ah->hw, msdu);
+ goto exit;
+ default:
+ ath12k_dbg(ab, ATH12K_DBG_DP_TX, "tx frame is not acked status %d\n",
+ ts->status);
+ break;
+ }
/* NOTE: Tx rate status reporting. Tx completion status does not have
* necessary information (for example nss) to build the tx rate.
* Might end up reporting it out-of-band from HTT stats.
*/
- ieee80211_tx_status_skb(ar->hw, msdu);
+ ath12k_dp_tx_update_txcompl(ar, ts);
+
+ ieee80211_tx_status_skb(ath12k_ar_to_hw(ar), msdu);
exit:
rcu_read_unlock();
@@ -508,6 +871,8 @@ static void ath12k_dp_tx_status_parse(struct ath12k_base *ab,
struct hal_wbm_completion_ring_tx *desc,
struct hal_tx_status *ts)
{
+ u32 info0 = le32_to_cpu(desc->rate_stats.info0);
+
ts->buf_rel_source =
le32_get_bits(desc->info0, HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE);
if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
@@ -522,10 +887,17 @@ static void ath12k_dp_tx_status_parse(struct ath12k_base *ab,
ts->ppdu_id = le32_get_bits(desc->info1,
HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER);
- if (le32_to_cpu(desc->rate_stats.info0) & HAL_TX_RATE_STATS_INFO0_VALID)
- ts->rate_stats = le32_to_cpu(desc->rate_stats.info0);
- else
- ts->rate_stats = 0;
+
+ ts->peer_id = le32_get_bits(desc->info3, HAL_WBM_COMPL_TX_INFO3_PEER_ID);
+
+ if (info0 & HAL_TX_RATE_STATS_INFO0_VALID) {
+ ts->pkt_type = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_PKT_TYPE);
+ ts->mcs = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_MCS);
+ ts->sgi = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_SGI);
+ ts->bw = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_BW);
+ ts->tones = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_TONES_IN_RU);
+ ts->ofdma = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_OFDMA_TX);
+ }
}
void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
@@ -535,12 +907,14 @@ void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
struct ath12k_tx_desc_info *tx_desc = NULL;
- struct sk_buff *msdu;
struct hal_tx_status ts = { 0 };
+ struct ath12k_tx_desc_params desc_params;
struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
struct hal_wbm_release_ring *desc;
- u8 mac_id, pdev_id;
+ u8 pdev_id;
u64 desc_va;
+ enum hal_wbm_rel_src_module buf_rel_source;
+ enum hal_wbm_tqm_rel_reason rel_status;
spin_lock_bh(&status_ring->lock);
@@ -593,28 +967,37 @@ void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
continue;
}
- msdu = tx_desc->skb;
- mac_id = tx_desc->mac_id;
+ desc_params.mac_id = tx_desc->mac_id;
+ desc_params.skb = tx_desc->skb;
+ desc_params.skb_ext_desc = tx_desc->skb_ext_desc;
+
+ /* Find the HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE value */
+ buf_rel_source = le32_get_bits(tx_status->info0,
+ HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE);
+ ab->device_stats.tx_wbm_rel_source[buf_rel_source]++;
+
+ rel_status = le32_get_bits(tx_status->info0,
+ HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON);
+ ab->device_stats.tqm_rel_reason[rel_status]++;
/* Release descriptor as soon as extracting necessary info
* to reduce contention
*/
ath12k_dp_tx_release_txbuf(dp, tx_desc, tx_desc->pool_id);
if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
- ath12k_dp_tx_process_htt_tx_complete(ab,
- (void *)tx_status,
- mac_id, msdu,
- tx_ring);
+ ath12k_dp_tx_process_htt_tx_complete(ab, (void *)tx_status,
+ tx_ring, &desc_params);
continue;
}
- pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
+ pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, desc_params.mac_id);
ar = ab->pdevs[pdev_id].ar;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
- ath12k_dp_tx_complete_msdu(ar, msdu, &ts);
+ ath12k_dp_tx_complete_msdu(ar, &desc_params, &ts,
+ tx_ring->tcl_data_ring_id);
}
}
@@ -654,7 +1037,7 @@ ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
*htt_ring_type = HTT_HW_TO_SW_RING;
break;
case HAL_RXDMA_MONITOR_BUF:
- *htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
+ *htt_ring_id = HTT_RX_MON_HOST2MON_BUF_RING;
*htt_ring_type = HTT_SW_TO_HW_RING;
break;
case HAL_RXDMA_MONITOR_STATUS:
@@ -662,21 +1045,13 @@ ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
*htt_ring_type = HTT_SW_TO_HW_RING;
break;
case HAL_RXDMA_MONITOR_DST:
- *htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
+ *htt_ring_id = HTT_RX_MON_MON2HOST_DEST_RING;
*htt_ring_type = HTT_HW_TO_SW_RING;
break;
case HAL_RXDMA_MONITOR_DESC:
*htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
*htt_ring_type = HTT_SW_TO_HW_RING;
break;
- case HAL_TX_MONITOR_BUF:
- *htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING;
- *htt_ring_type = HTT_SW_TO_HW_RING;
- break;
- case HAL_TX_MONITOR_DST:
- *htt_ring_id = HTT_TX_MON_MON2HOST_DEST_RING;
- *htt_ring_type = HTT_HW_TO_SW_RING;
- break;
default:
ath12k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
ret = -EINVAL;
@@ -819,7 +1194,14 @@ int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab)
skb_put(skb, len);
cmd = (struct htt_ver_req_cmd *)skb->data;
cmd->ver_reg_info = le32_encode_bits(HTT_H2T_MSG_TYPE_VERSION_REQ,
- HTT_VER_REQ_INFO_MSG_ID);
+ HTT_OPTION_TAG);
+
+ cmd->tcl_metadata_version = le32_encode_bits(HTT_TAG_TCL_METADATA_VERSION,
+ HTT_OPTION_TAG) |
+ le32_encode_bits(HTT_TCL_METADATA_VER_SZ,
+ HTT_OPTION_LEN) |
+ le32_encode_bits(HTT_OPTION_TCL_METADATA_VER_V2,
+ HTT_OPTION_VALUE);
ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
if (ret) {
@@ -837,7 +1219,7 @@ int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab)
if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
ath12k_err(ab, "unsupported htt major version %d supported version is %d\n",
dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
@@ -854,7 +1236,7 @@ int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask)
int ret;
int i;
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
skb = ath12k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
@@ -925,15 +1307,46 @@ int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS);
cmd->info0 |= le32_encode_bits(tlv_filter->offset_valid,
- HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID);
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_OFFSET_VALID);
+ cmd->info0 |=
+ le32_encode_bits(tlv_filter->drop_threshold_valid,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_DROP_THRES_VAL);
+ cmd->info0 |= le32_encode_bits(!tlv_filter->rxmon_disable,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_EN_RXMON);
+
cmd->info1 = le32_encode_bits(rx_buf_size,
HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE);
+ cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_mgmt,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT);
+ cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_ctrl,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL);
+ cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_data,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA);
cmd->pkt_type_en_flags0 = cpu_to_le32(tlv_filter->pkt_filter_flags0);
cmd->pkt_type_en_flags1 = cpu_to_le32(tlv_filter->pkt_filter_flags1);
cmd->pkt_type_en_flags2 = cpu_to_le32(tlv_filter->pkt_filter_flags2);
cmd->pkt_type_en_flags3 = cpu_to_le32(tlv_filter->pkt_filter_flags3);
cmd->rx_filter_tlv = cpu_to_le32(tlv_filter->rx_filter);
+ cmd->info2 = le32_encode_bits(tlv_filter->rx_drop_threshold,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO2_DROP_THRESHOLD);
+ cmd->info2 |=
+ le32_encode_bits(tlv_filter->enable_log_mgmt_type,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_MGMT_TYPE);
+ cmd->info2 |=
+ le32_encode_bits(tlv_filter->enable_log_ctrl_type,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_CTRL_TYPE);
+ cmd->info2 |=
+ le32_encode_bits(tlv_filter->enable_log_data_type,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_DATA_TYPE);
+
+ cmd->info3 =
+ le32_encode_bits(tlv_filter->enable_rx_tlv_offset,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO3_EN_TLV_PKT_OFFSET);
+ cmd->info3 |=
+ le32_encode_bits(tlv_filter->rx_tlv_offset,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO3_PKT_TLV_OFFSET);
+
if (tlv_filter->offset_valid) {
cmd->rx_packet_offset =
le32_encode_bits(tlv_filter->rx_packet_offset,
@@ -964,6 +1377,26 @@ int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET);
}
+ if (tlv_filter->rx_mpdu_start_wmask > 0 &&
+ tlv_filter->rx_msdu_end_wmask > 0) {
+ cmd->info2 |=
+ le32_encode_bits(true,
+ HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET);
+ cmd->rx_mpdu_start_end_mask =
+ le32_encode_bits(tlv_filter->rx_mpdu_start_wmask,
+ HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK);
+ /* mpdu_end is not used for any hardwares so far
+ * please assign it in future if any chip is
+ * using through hal ops
+ */
+ cmd->rx_mpdu_start_end_mask |=
+ le32_encode_bits(tlv_filter->rx_mpdu_end_wmask,
+ HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK);
+ cmd->rx_msdu_end_word_mask =
+ le32_encode_bits(tlv_filter->rx_msdu_end_wmask,
+ HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK);
+ }
+
ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
if (ret)
goto err_free;
@@ -987,6 +1420,7 @@ ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
struct htt_ext_stats_cfg_cmd *cmd;
int len = sizeof(*cmd);
int ret;
+ u32 pdev_id;
skb = ath12k_htc_alloc_skb(ab, len);
if (!skb)
@@ -998,7 +1432,8 @@ ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
memset(cmd, 0, sizeof(*cmd));
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
- cmd->hdr.pdev_mask = 1 << ar->pdev->pdev_id;
+ pdev_id = ath12k_mac_get_target_pdev_id(ar);
+ cmd->hdr.pdev_mask = 1 << pdev_id;
cmd->hdr.stats_type = type;
cmd->cfg_param0 = cpu_to_le32(cfg_params->cfg0);
@@ -1024,13 +1459,7 @@ int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset)
struct ath12k_base *ab = ar->ab;
int ret;
- ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset);
- if (ret) {
- ath12k_err(ab, "failed to setup tx monitor filter %d\n", ret);
- return ret;
- }
-
- ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset);
+ ret = ath12k_dp_tx_htt_rx_monitor_mode_ring_config(ar, reset);
if (ret) {
ath12k_err(ab, "failed to setup rx monitor filter %d\n", ret);
return ret;
@@ -1042,15 +1471,28 @@ int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset)
int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
{
struct ath12k_base *ab = ar->ab;
- struct ath12k_dp *dp = &ab->dp;
struct htt_rx_ring_tlv_filter tlv_filter = {0};
- int ret, ring_id;
+ int ret, ring_id, i;
- ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
tlv_filter.offset_valid = false;
if (!reset) {
- tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING;
+ tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING;
+
+ tlv_filter.drop_threshold_valid = true;
+ tlv_filter.rx_drop_threshold = HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE;
+
+ tlv_filter.enable_log_mgmt_type = true;
+ tlv_filter.enable_log_ctrl_type = true;
+ tlv_filter.enable_log_data_type = true;
+
+ tlv_filter.conf_len_ctrl = HTT_RX_RING_DEFAULT_DMA_LENGTH;
+ tlv_filter.conf_len_mgmt = HTT_RX_RING_DEFAULT_DMA_LENGTH;
+ tlv_filter.conf_len_data = HTT_RX_RING_DEFAULT_DMA_LENGTH;
+
+ tlv_filter.enable_rx_tlv_offset = true;
+ tlv_filter.rx_tlv_offset = HTT_RX_RING_PKT_TLV_OFFSET;
+
tlv_filter.pkt_filter_flags0 =
HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
@@ -1065,16 +1507,64 @@ int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
HTT_RX_MON_MO_DATA_FILTER_FLASG3;
+ } else {
+ tlv_filter = ath12k_mac_mon_status_filter_default;
+
+ if (ath12k_debugfs_is_extd_rx_stats_enabled(ar))
+ tlv_filter.rx_filter = ath12k_debugfs_rx_filter(ar);
}
if (ab->hw_params->rxdma1_enable) {
- ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, 0,
- HAL_RXDMA_MONITOR_BUF,
- DP_RXDMA_REFILL_RING_SIZE,
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
+ ar->dp.mac_id + i,
+ HAL_RXDMA_MONITOR_DST,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ if (ret) {
+ ath12k_err(ab,
+ "failed to setup filter for monitor buf %d\n",
+ ret);
+ return ret;
+ }
+ }
+ return 0;
+ }
+
+ if (!reset) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id = ab->dp.rx_mac_buf_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
+ i,
+ HAL_RXDMA_BUF,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ if (ret) {
+ ath12k_err(ab,
+ "failed to setup filter for mon rx buf %d\n",
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id = ab->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ if (!reset) {
+ tlv_filter.rx_filter =
+ HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
+ }
+
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id,
+ i,
+ HAL_RXDMA_MONITOR_STATUS,
+ RX_MON_STATUS_BUF_SIZE,
&tlv_filter);
if (ret) {
ath12k_err(ab,
- "failed to setup filter for monitor buf %d\n", ret);
+ "failed to setup filter for mon status buf %d\n",
+ ret);
return ret;
}
}
@@ -1189,31 +1679,3 @@ err_free:
dev_kfree_skb_any(skb);
return ret;
}
-
-int ath12k_dp_tx_htt_tx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
-{
- struct ath12k_base *ab = ar->ab;
- struct ath12k_dp *dp = &ab->dp;
- struct htt_tx_ring_tlv_filter tlv_filter = {0};
- int ret, ring_id;
-
- ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id;
-
- /* TODO: Need to set upstream/downstream tlv filters
- * here
- */
-
- if (ab->hw_params->rxdma1_enable) {
- ret = ath12k_dp_tx_htt_tx_filter_setup(ar->ab, ring_id, 0,
- HAL_TX_MONITOR_BUF,
- DP_RXDMA_REFILL_RING_SIZE,
- &tlv_filter);
- if (ret) {
- ath12k_err(ab,
- "failed to setup filter for monitor buf %d\n", ret);
- return ret;
- }
- }
-
- return 0;
-}