summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/ath/ath12k/dp_rx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath12k/dp_rx.c')
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c1302
1 files changed, 842 insertions, 460 deletions
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index ca76c018dd0c..bd95dc88f9b2 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/ieee80211.h>
@@ -17,6 +17,7 @@
#include "dp_tx.h"
#include "peer.h"
#include "dp_mon.h"
+#include "debugfs_htt_stats.h"
#define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
@@ -193,6 +194,22 @@ static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab,
ab->hal_rx_ops->rx_desc_set_msdu_len(desc, len);
}
+u32 ath12k_dp_rxdesc_get_ppduid(struct ath12k_base *ab,
+ struct hal_rx_desc *rx_desc)
+{
+ return ab->hal_rx_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
+}
+
+bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab,
+ struct hal_rx_desc *rx_desc)
+{
+ u32 tlv_tag;
+
+ tlv_tag = ab->hal_rx_ops->rx_desc_get_mpdu_start_tag(rx_desc);
+
+ return tlv_tag == HAL_RX_MPDU_START;
+}
+
static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
@@ -227,43 +244,67 @@ static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
ab->hal_rx_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
}
-static u16 ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base *ab,
+static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hal_rx_ops->rx_desc_get_mpdu_frame_ctl(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_src_link_id(desc);
}
-static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
+static void ath12k_dp_clean_up_skb_list(struct sk_buff_head *skb_list)
{
- return ab->hal_rx_ops->rx_desc_get_msdu_src_link_id(desc);
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(skb_list)))
+ dev_kfree_skb_any(skb);
}
-static int ath12k_dp_purge_mon_ring(struct ath12k_base *ab)
+static size_t ath12k_dp_list_cut_nodes(struct list_head *list,
+ struct list_head *head,
+ size_t count)
{
- int i, reaped = 0;
- unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
+ struct list_head *cur;
+ struct ath12k_rx_desc_info *rx_desc;
+ size_t nodes = 0;
- do {
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++)
- reaped += ath12k_dp_mon_process_ring(ab, i, NULL,
- DP_MON_SERVICE_BUDGET,
- ATH12K_DP_RX_MONITOR_MODE);
+ if (!count) {
+ INIT_LIST_HEAD(list);
+ goto out;
+ }
- /* nothing more to reap */
- if (reaped < DP_MON_SERVICE_BUDGET)
- return 0;
+ list_for_each(cur, head) {
+ if (!count)
+ break;
- } while (time_before(jiffies, timeout));
+ rx_desc = list_entry(cur, struct ath12k_rx_desc_info, list);
+ rx_desc->in_use = true;
- ath12k_warn(ab, "dp mon ring purge timeout");
+ count--;
+ nodes++;
+ }
- return -ETIMEDOUT;
+ list_cut_before(list, head, cur);
+out:
+ return nodes;
+}
+
+static void ath12k_dp_rx_enqueue_free(struct ath12k_dp *dp,
+ struct list_head *used_list)
+{
+ struct ath12k_rx_desc_info *rx_desc, *safe;
+
+ /* Reset the use flag */
+ list_for_each_entry_safe(rx_desc, safe, used_list, list)
+ rx_desc->in_use = false;
+
+ spin_lock_bh(&dp->rx_desc_lock);
+ list_splice_tail(used_list, &dp->rx_desc_free_list);
+ spin_unlock_bh(&dp->rx_desc_lock);
}
/* Returns number of Rx buffers replenished */
int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
struct dp_rxdma_ring *rx_ring,
+ struct list_head *used_list,
int req_entries)
{
struct ath12k_buffer_addr *desc;
@@ -292,6 +333,19 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
req_entries = min(num_free, req_entries);
num_remain = req_entries;
+ if (!num_remain)
+ goto out;
+
+ /* Get the descriptor from free list */
+ if (list_empty(used_list)) {
+ spin_lock_bh(&dp->rx_desc_lock);
+ req_entries = ath12k_dp_list_cut_nodes(used_list,
+ &dp->rx_desc_free_list,
+ num_remain);
+ spin_unlock_bh(&dp->rx_desc_lock);
+ num_remain = req_entries;
+ }
+
while (num_remain > 0) {
skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
DP_RX_BUFFER_ALIGN_SIZE);
@@ -311,33 +365,20 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
if (dma_mapping_error(ab->dev, paddr))
goto fail_free_skb;
- spin_lock_bh(&dp->rx_desc_lock);
-
- /* Get desc from free list and store in used list
- * for cleanup purposes
- *
- * TODO: pass the removed descs rather than
- * add/read to optimize
- */
- rx_desc = list_first_entry_or_null(&dp->rx_desc_free_list,
+ rx_desc = list_first_entry_or_null(used_list,
struct ath12k_rx_desc_info,
list);
- if (!rx_desc) {
- spin_unlock_bh(&dp->rx_desc_lock);
+ if (!rx_desc)
goto fail_dma_unmap;
- }
rx_desc->skb = skb;
cookie = rx_desc->cookie;
- list_del(&rx_desc->list);
- list_add_tail(&rx_desc->list, &dp->rx_desc_used_list);
-
- spin_unlock_bh(&dp->rx_desc_lock);
desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
if (!desc)
- goto fail_buf_unassign;
+ goto fail_dma_unmap;
+ list_del(&rx_desc->list);
ATH12K_SKB_RXCB(skb)->paddr = paddr;
num_remain--;
@@ -345,26 +386,19 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
}
- ath12k_hal_srng_access_end(ab, srng);
-
- spin_unlock_bh(&srng->lock);
+ goto out;
- return req_entries - num_remain;
-
-fail_buf_unassign:
- spin_lock_bh(&dp->rx_desc_lock);
- list_del(&rx_desc->list);
- list_add_tail(&rx_desc->list, &dp->rx_desc_free_list);
- rx_desc->skb = NULL;
- spin_unlock_bh(&dp->rx_desc_lock);
fail_dma_unmap:
dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
fail_free_skb:
dev_kfree_skb_any(skb);
-
+out:
ath12k_hal_srng_access_end(ab, srng);
+ if (!list_empty(used_list))
+ ath12k_dp_rx_enqueue_free(dp, used_list);
+
spin_unlock_bh(&srng->lock);
return req_entries - num_remain;
@@ -396,10 +430,16 @@ static int ath12k_dp_rxdma_mon_buf_ring_free(struct ath12k_base *ab,
static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
+ int i;
ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring);
- ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->tx_mon_buf_ring);
+ if (ab->hw_params->rxdma1_enable)
+ return 0;
+
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
+ ath12k_dp_rxdma_mon_buf_ring_free(ab,
+ &dp->rx_mon_status_refill_ring[i]);
return 0;
}
@@ -414,7 +454,12 @@ static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab,
ath12k_hal_srng_get_entrysize(ab, ringtype);
rx_ring->bufs_max = num_entries;
- ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
+
+ if (ringtype == HAL_RXDMA_MONITOR_STATUS)
+ ath12k_dp_mon_status_bufs_replenish(ab, rx_ring,
+ num_entries);
+ else
+ ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
return 0;
}
@@ -422,13 +467,12 @@ static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab,
static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab,
struct dp_rxdma_ring *rx_ring)
{
- int num_entries;
+ LIST_HEAD(list);
- num_entries = rx_ring->refill_buf_ring.size /
- ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF);
+ rx_ring->bufs_max = rx_ring->refill_buf_ring.size /
+ ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF);
- rx_ring->bufs_max = num_entries;
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_entries);
+ ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0);
return 0;
}
@@ -436,7 +480,8 @@ static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab,
static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
- int ret;
+ struct dp_rxdma_mon_ring *mon_ring;
+ int ret, i;
ret = ath12k_dp_rxdma_ring_buf_setup(ab, &dp->rx_refill_buf_ring);
if (ret) {
@@ -449,18 +494,19 @@ static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab,
&dp->rxdma_mon_buf_ring,
HAL_RXDMA_MONITOR_BUF);
- if (ret) {
+ if (ret)
ath12k_warn(ab,
"failed to setup HAL_RXDMA_MONITOR_BUF\n");
- return ret;
- }
+ return ret;
+ }
- ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab,
- &dp->tx_mon_buf_ring,
- HAL_TX_MONITOR_BUF);
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ mon_ring = &dp->rx_mon_status_refill_ring[i];
+ ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, mon_ring,
+ HAL_RXDMA_MONITOR_STATUS);
if (ret) {
ath12k_warn(ab,
- "failed to setup HAL_TX_MONITOR_BUF\n");
+ "failed to setup HAL_RXDMA_MONITOR_STATUS\n");
return ret;
}
}
@@ -474,10 +520,8 @@ static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar)
struct ath12k_base *ab = ar->ab;
int i;
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]);
- ath12k_dp_srng_cleanup(ab, &dp->tx_mon_dst_ring[i]);
- }
}
void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab)
@@ -521,7 +565,7 @@ static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
int ret;
u32 mac_id = dp->mac_id;
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ret = ath12k_dp_srng_setup(ar->ab,
&dp->rxdma_mon_dst_ring[i],
HAL_RXDMA_MONITOR_DST,
@@ -532,17 +576,6 @@ static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
"failed to setup HAL_RXDMA_MONITOR_DST\n");
return ret;
}
-
- ret = ath12k_dp_srng_setup(ar->ab,
- &dp->tx_mon_dst_ring[i],
- HAL_TX_MONITOR_DST,
- 0, mac_id + i,
- DP_TX_MONITOR_DEST_RING_SIZE);
- if (ret) {
- ath12k_warn(ar->ab,
- "failed to setup HAL_TX_MONITOR_DST\n");
- return ret;
- }
}
return 0;
@@ -557,9 +590,9 @@ void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
spin_lock_bh(&dp->reo_cmd_lock);
list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
list_del(&cmd->list);
- dma_unmap_single(ab->dev, cmd->data.paddr,
- cmd->data.size, DMA_BIDIRECTIONAL);
- kfree(cmd->data.vaddr);
+ dma_unmap_single(ab->dev, cmd->data.qbuf.paddr_aligned,
+ cmd->data.qbuf.size, DMA_BIDIRECTIONAL);
+ kfree(cmd->data.qbuf.vaddr);
kfree(cmd);
}
@@ -567,9 +600,9 @@ void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
&dp->reo_cmd_cache_flush_list, list) {
list_del(&cmd_cache->list);
dp->reo_cmd_cache_flush_count--;
- dma_unmap_single(ab->dev, cmd_cache->data.paddr,
- cmd_cache->data.size, DMA_BIDIRECTIONAL);
- kfree(cmd_cache->data.vaddr);
+ dma_unmap_single(ab->dev, cmd_cache->data.qbuf.paddr_aligned,
+ cmd_cache->data.qbuf.size, DMA_BIDIRECTIONAL);
+ kfree(cmd_cache->data.qbuf.vaddr);
kfree(cmd_cache);
}
spin_unlock_bh(&dp->reo_cmd_lock);
@@ -584,10 +617,10 @@ static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
rx_tid->tid, status);
- dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
+ dma_unmap_single(dp->ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size,
DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ kfree(rx_tid->qbuf.vaddr);
+ rx_tid->qbuf.vaddr = NULL;
}
static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid,
@@ -642,13 +675,13 @@ static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
unsigned long tot_desc_sz, desc_sz;
int ret;
- tot_desc_sz = rx_tid->size;
+ tot_desc_sz = rx_tid->qbuf.size;
desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
while (tot_desc_sz > desc_sz) {
tot_desc_sz -= desc_sz;
- cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
- cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned + tot_desc_sz);
+ cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
HAL_REO_CMD_FLUSH_CACHE, &cmd,
NULL);
@@ -659,8 +692,8 @@ static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
}
memset(&cmd, 0, sizeof(cmd));
- cmd.addr_lo = lower_32_bits(rx_tid->paddr);
- cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
+ cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
HAL_REO_CMD_FLUSH_CACHE,
@@ -668,10 +701,10 @@ static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
if (ret) {
ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
rx_tid->tid, ret);
- dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+ dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size,
DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ kfree(rx_tid->qbuf.vaddr);
+ rx_tid->qbuf.vaddr = NULL;
}
}
@@ -730,10 +763,10 @@ static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
return;
free_desc:
- dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+ dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size,
DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ kfree(rx_tid->qbuf.vaddr);
+ rx_tid->qbuf.vaddr = NULL;
}
static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid,
@@ -741,36 +774,51 @@ static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u
{
struct ath12k_reo_queue_ref *qref;
struct ath12k_dp *dp = &ab->dp;
+ bool ml_peer = false;
if (!ab->hw_params->reoq_lut_support)
return;
- /* TODO: based on ML peer or not, select the LUT. below assumes non
- * ML peer
- */
- qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
- (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ if (peer_id & ATH12K_PEER_ML_ID_VALID) {
+ peer_id &= ~ATH12K_PEER_ML_ID_VALID;
+ ml_peer = true;
+ }
+
+ if (ml_peer)
+ qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ else
+ qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
qref->info0 = u32_encode_bits(lower_32_bits(paddr),
BUFFER_ADDR_INFO0_ADDR);
qref->info1 = u32_encode_bits(upper_32_bits(paddr),
BUFFER_ADDR_INFO1_ADDR) |
u32_encode_bits(tid, DP_REO_QREF_NUM);
+ ath12k_hal_reo_shared_qaddr_cache_clear(ab);
}
static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid)
{
struct ath12k_reo_queue_ref *qref;
struct ath12k_dp *dp = &ab->dp;
+ bool ml_peer = false;
if (!ab->hw_params->reoq_lut_support)
return;
- /* TODO: based on ML peer or not, select the LUT. below assumes non
- * ML peer
- */
- qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
- (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ if (peer_id & ATH12K_PEER_ML_ID_VALID) {
+ peer_id &= ~ATH12K_PEER_ML_ID_VALID;
+ ml_peer = true;
+ }
+
+ if (ml_peer)
+ qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ else
+ qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
@@ -788,8 +836,8 @@ void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
return;
cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
- cmd.addr_lo = lower_32_bits(rx_tid->paddr);
- cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
+ cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
cmd.upd0 = HAL_REO_CMD_UPD0_VLD;
ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
@@ -797,26 +845,24 @@ void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
if (ret) {
ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
tid, ret);
- dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ dma_unmap_single(ar->ab->dev, rx_tid->qbuf.paddr_aligned,
+ rx_tid->qbuf.size, DMA_BIDIRECTIONAL);
+ kfree(rx_tid->qbuf.vaddr);
+ rx_tid->qbuf.vaddr = NULL;
}
- ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
+ if (peer->mlo)
+ ath12k_peer_rx_tid_qref_reset(ar->ab, peer->ml_id, tid);
+ else
+ ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
rx_tid->active = false;
}
-/* TODO: it's strange (and ugly) that struct hal_reo_dest_ring is converted
- * to struct hal_wbm_release_ring, I couldn't figure out the logic behind
- * that.
- */
-static int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
- struct hal_reo_dest_ring *ring,
- enum hal_wbm_rel_bm_act action)
+int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
+ struct ath12k_buffer_addr *buf_addr_info,
+ enum hal_wbm_rel_bm_act action)
{
- struct hal_wbm_release_ring *link_desc = (struct hal_wbm_release_ring *)ring;
struct hal_wbm_release_ring *desc;
struct ath12k_dp *dp = &ab->dp;
struct hal_srng *srng;
@@ -834,7 +880,7 @@ static int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
goto exit;
}
- ath12k_hal_rx_msdu_link_desc_set(ab, desc, link_desc, action);
+ ath12k_hal_rx_msdu_link_desc_set(ab, desc, buf_addr_info, action);
exit:
ath12k_hal_srng_access_end(ab, srng);
@@ -847,14 +893,17 @@ exit:
static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid,
bool rel_link_desc)
{
+ struct ath12k_buffer_addr *buf_addr_info;
struct ath12k_base *ab = rx_tid->ab;
lockdep_assert_held(&ab->base_lock);
if (rx_tid->dst_ring_desc) {
- if (rel_link_desc)
- ath12k_dp_rx_link_desc_return(ab, rx_tid->dst_ring_desc,
+ if (rel_link_desc) {
+ buf_addr_info = &rx_tid->dst_ring_desc->buf_addr_info;
+ ath12k_dp_rx_link_desc_return(ab, buf_addr_info,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
kfree(rx_tid->dst_ring_desc);
rx_tid->dst_ring_desc = NULL;
}
@@ -879,7 +928,7 @@ void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer)
ath12k_dp_rx_frags_cleanup(rx_tid, true);
spin_unlock_bh(&ar->ab->base_lock);
- del_timer_sync(&rx_tid->frag_timer);
+ timer_delete_sync(&rx_tid->frag_timer);
spin_lock_bh(&ar->ab->base_lock);
}
}
@@ -893,8 +942,8 @@ static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
struct ath12k_hal_reo_cmd cmd = {0};
int ret;
- cmd.addr_lo = lower_32_bits(rx_tid->paddr);
- cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
+ cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
cmd.ba_window_size = ba_win_sz;
@@ -918,18 +967,67 @@ static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
return 0;
}
+static int ath12k_dp_rx_assign_reoq(struct ath12k_base *ab,
+ struct ath12k_sta *ahsta,
+ struct ath12k_dp_rx_tid *rx_tid,
+ u16 ssn, enum hal_pn_type pn_type)
+{
+ u32 ba_win_sz = rx_tid->ba_win_sz;
+ struct ath12k_reoq_buf *buf;
+ void *vaddr, *vaddr_aligned;
+ dma_addr_t paddr_aligned;
+ u8 tid = rx_tid->tid;
+ u32 hw_desc_sz;
+ int ret;
+
+ buf = &ahsta->reoq_bufs[tid];
+ if (!buf->vaddr) {
+ /* TODO: Optimize the memory allocation for qos tid based on
+ * the actual BA window size in REO tid update path.
+ */
+ if (tid == HAL_DESC_REO_NON_QOS_TID)
+ hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid);
+ else
+ hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
+
+ vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
+ if (!vaddr)
+ return -ENOMEM;
+
+ vaddr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
+
+ ath12k_hal_reo_qdesc_setup(vaddr_aligned, tid, ba_win_sz,
+ ssn, pn_type);
+
+ paddr_aligned = dma_map_single(ab->dev, vaddr_aligned, hw_desc_sz,
+ DMA_BIDIRECTIONAL);
+ ret = dma_mapping_error(ab->dev, paddr_aligned);
+ if (ret) {
+ kfree(vaddr);
+ return ret;
+ }
+
+ buf->vaddr = vaddr;
+ buf->paddr_aligned = paddr_aligned;
+ buf->size = hw_desc_sz;
+ }
+
+ rx_tid->qbuf = *buf;
+ rx_tid->active = true;
+
+ return 0;
+}
+
int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
u8 tid, u32 ba_win_sz, u16 ssn,
enum hal_pn_type pn_type)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_dp *dp = &ab->dp;
- struct hal_rx_reo_queue *addr_aligned;
struct ath12k_peer *peer;
+ struct ath12k_sta *ahsta;
struct ath12k_dp_rx_tid *rx_tid;
- u32 hw_desc_sz;
- void *vaddr;
- dma_addr_t paddr;
+ dma_addr_t paddr_aligned;
int ret;
spin_lock_bh(&ab->base_lock);
@@ -941,7 +1039,14 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
return -ENOENT;
}
- if (ab->hw_params->reoq_lut_support && !dp->reoq_lut.vaddr) {
+ if (ab->hw_params->dp_primary_link_only &&
+ !peer->primary_link) {
+ spin_unlock_bh(&ab->base_lock);
+ return 0;
+ }
+
+ if (ab->hw_params->reoq_lut_support &&
+ (!dp->reoq_lut.vaddr || !dp->ml_reoq_lut.vaddr)) {
spin_unlock_bh(&ab->base_lock);
ath12k_warn(ab, "reo qref table is not setup\n");
return -EINVAL;
@@ -957,7 +1062,6 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
rx_tid = &peer->rx_tid[tid];
/* Update the tid queue if it is already setup */
if (rx_tid->active) {
- paddr = rx_tid->paddr;
ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid,
ba_win_sz, ssn, true);
spin_unlock_bh(&ab->base_lock);
@@ -967,10 +1071,11 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
}
if (!ab->hw_params->reoq_lut_support) {
+ paddr_aligned = rx_tid->qbuf.paddr_aligned;
ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
peer_mac,
- paddr, tid, 1,
- ba_win_sz);
+ paddr_aligned, tid,
+ 1, ba_win_sz);
if (ret) {
ath12k_warn(ab, "failed to setup peer rx reorder queuefor tid %d: %d\n",
tid, ret);
@@ -985,70 +1090,59 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
rx_tid->ba_win_sz = ba_win_sz;
- /* TODO: Optimize the memory allocation for qos tid based on
- * the actual BA window size in REO tid update path.
- */
- if (tid == HAL_DESC_REO_NON_QOS_TID)
- hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid);
- else
- hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
-
- vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
- if (!vaddr) {
- spin_unlock_bh(&ab->base_lock);
- return -ENOMEM;
- }
-
- addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
-
- ath12k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
- ssn, pn_type);
-
- paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
- DMA_BIDIRECTIONAL);
-
- ret = dma_mapping_error(ab->dev, paddr);
+ ahsta = ath12k_sta_to_ahsta(peer->sta);
+ ret = ath12k_dp_rx_assign_reoq(ab, ahsta, rx_tid, ssn, pn_type);
if (ret) {
spin_unlock_bh(&ab->base_lock);
- goto err_mem_free;
+ ath12k_warn(ab, "failed to assign reoq buf for rx tid %u\n", tid);
+ return ret;
}
- rx_tid->vaddr = vaddr;
- rx_tid->paddr = paddr;
- rx_tid->size = hw_desc_sz;
- rx_tid->active = true;
-
+ paddr_aligned = rx_tid->qbuf.paddr_aligned;
if (ab->hw_params->reoq_lut_support) {
/* Update the REO queue LUT at the corresponding peer id
* and tid with qaddr.
*/
- ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);
+ if (peer->mlo)
+ ath12k_peer_rx_tid_qref_setup(ab, peer->ml_id, tid,
+ paddr_aligned);
+ else
+ ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid,
+ paddr_aligned);
+
spin_unlock_bh(&ab->base_lock);
} else {
spin_unlock_bh(&ab->base_lock);
ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
- paddr, tid, 1, ba_win_sz);
+ paddr_aligned, tid, 1,
+ ba_win_sz);
}
return ret;
-
-err_mem_free:
- kfree(vaddr);
-
- return ret;
}
int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
- struct ieee80211_ampdu_params *params)
+ struct ieee80211_ampdu_params *params,
+ u8 link_id)
{
struct ath12k_base *ab = ar->ab;
- struct ath12k_sta *arsta = ath12k_sta_to_arsta(params->sta);
- int vdev_id = arsta->arvif->vdev_id;
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
+ struct ath12k_link_sta *arsta;
+ int vdev_id;
int ret;
- ret = ath12k_dp_rx_peer_tid_setup(ar, params->sta->addr, vdev_id,
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahsta->link[link_id]);
+ if (!arsta)
+ return -ENOLINK;
+
+ vdev_id = arsta->arvif->vdev_id;
+
+ ret = ath12k_dp_rx_peer_tid_setup(ar, arsta->addr, vdev_id,
params->tid, params->buf_size,
- params->ssn, arsta->pn_type);
+ params->ssn, arsta->ahsta->pn_type);
if (ret)
ath12k_warn(ab, "failed to setup rx tid %d\n", ret);
@@ -1056,18 +1150,29 @@ int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
}
int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
- struct ieee80211_ampdu_params *params)
+ struct ieee80211_ampdu_params *params,
+ u8 link_id)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_peer *peer;
- struct ath12k_sta *arsta = ath12k_sta_to_arsta(params->sta);
- int vdev_id = arsta->arvif->vdev_id;
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
+ struct ath12k_link_sta *arsta;
+ int vdev_id;
bool active;
int ret;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahsta->link[link_id]);
+ if (!arsta)
+ return -ENOLINK;
+
+ vdev_id = arsta->arvif->vdev_id;
+
spin_lock_bh(&ab->base_lock);
- peer = ath12k_peer_find(ab, vdev_id, params->sta->addr);
+ peer = ath12k_peer_find(ab, vdev_id, arsta->addr);
if (!peer) {
spin_unlock_bh(&ab->base_lock);
ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n");
@@ -1092,7 +1197,7 @@ int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
return ret;
}
-int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif,
+int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
const u8 *peer_addr,
enum set_key_cmd key_cmd,
struct ieee80211_key_conf *key)
@@ -1148,8 +1253,8 @@ int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif,
rx_tid = &peer->rx_tid[tid];
if (!rx_tid->active)
continue;
- cmd.addr_lo = lower_32_bits(rx_tid->paddr);
- cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
+ cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
HAL_REO_CMD_UPDATE_RX_QUEUE,
&cmd, NULL);
@@ -1270,10 +1375,10 @@ static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab,
return 0;
}
-static int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
- int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
- const void *ptr, void *data),
- void *data)
+int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data)
{
const struct htt_tlv *tlv;
const void *begin = ptr;
@@ -1314,7 +1419,8 @@ ath12k_update_per_peer_tx_stats(struct ath12k *ar,
struct ath12k_base *ab = ar->ab;
struct ath12k_peer *peer;
struct ieee80211_sta *sta;
- struct ath12k_sta *arsta;
+ struct ath12k_sta *ahsta;
+ struct ath12k_link_sta *arsta;
struct htt_ppdu_stats_user_rate *user_rate;
struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
@@ -1395,7 +1501,8 @@ ath12k_update_per_peer_tx_stats(struct ath12k *ar,
}
sta = peer->sta;
- arsta = ath12k_sta_to_arsta(sta);
+ ahsta = ath12k_sta_to_ahsta(sta);
+ arsta = &ahsta->deflink;
memset(&arsta->txrate, 0, sizeof(arsta->txrate));
@@ -1647,7 +1754,11 @@ static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
rcu_read_lock();
ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
if (!ar) {
- ath12k_warn(ab, "invalid pdev id %d on htt mlo offset\n", pdev_id);
+ /* It is possible that the ar is not yet active (started).
+ * The above function will only look for the active pdev
+ * and hence %NULL return is possible. Just silently
+ * discard this message
+ */
goto exit;
}
@@ -1730,8 +1841,12 @@ void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
peer_mac_h16, mac_addr);
+ ast_hash = le32_get_bits(resp->peer_map_ev.info2,
+ HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL);
+ hw_peer_id = le32_get_bits(resp->peer_map_ev.info2,
+ HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID);
ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
- peer_id);
+ hw_peer_id);
break;
case HTT_T2H_MSG_TYPE_PEER_UNMAP:
case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
@@ -1743,6 +1858,7 @@ void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
ath12k_htt_pull_ppdu_stats(ab, skb);
break;
case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
+ ath12k_debugfs_htt_ext_stats_handler(ab, skb);
break;
case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
ath12k_htt_mlo_offset_event_handler(ab, skb);
@@ -1768,6 +1884,7 @@ static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
struct hal_rx_desc *ldesc;
int space_extra, rem_len, buf_len;
u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
+ bool is_continuation;
/* As the msdu is spread across multiple rx buffers,
* find the offset to the start of msdu for computing
@@ -1816,7 +1933,8 @@ static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
rem_len = msdu_len - buf_first_len;
while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
rxcb = ATH12K_SKB_RXCB(skb);
- if (rxcb->is_continuation)
+ is_continuation = rxcb->is_continuation;
+ if (is_continuation)
buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
else
buf_len = rem_len;
@@ -1834,7 +1952,7 @@ static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
dev_kfree_skb_any(skb);
rem_len -= buf_len;
- if (!rxcb->is_continuation)
+ if (!is_continuation)
break;
}
@@ -1859,21 +1977,14 @@ static struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_
return NULL;
}
-static void ath12k_dp_rx_h_csum_offload(struct ath12k *ar, struct sk_buff *msdu)
+static void ath12k_dp_rx_h_csum_offload(struct sk_buff *msdu,
+ struct ath12k_dp_rx_info *rx_info)
{
- struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- struct ath12k_base *ab = ar->ab;
- bool ip_csum_fail, l4_csum_fail;
-
- ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rxcb->rx_desc);
- l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rxcb->rx_desc);
-
- msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
- CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
+ msdu->ip_summed = (rx_info->ip_csum_fail || rx_info->l4_csum_fail) ?
+ CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
}
-static int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar,
- enum hal_encrypt_type enctype)
+int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype)
{
switch (enctype) {
case HAL_ENCRYPT_TYPE_OPEN:
@@ -2067,10 +2178,13 @@ static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar,
struct hal_rx_desc *rx_desc = rxcb->rx_desc;
struct ath12k_base *ab = ar->ab;
size_t hdr_len, crypto_len;
- struct ieee80211_hdr *hdr;
- u16 qos_ctl;
- __le16 fc;
- u8 *crypto_hdr;
+ struct ieee80211_hdr hdr;
+ __le16 qos_ctl;
+ u8 *crypto_hdr, mesh_ctrl;
+
+ ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, &hdr);
+ hdr_len = ieee80211_hdrlen(hdr.frame_control);
+ mesh_ctrl = ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc);
if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
@@ -2078,27 +2192,21 @@ static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar,
ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype);
}
- fc = cpu_to_le16(ath12k_dp_rxdesc_get_mpdu_frame_ctrl(ab, rx_desc));
- hdr_len = ieee80211_hdrlen(fc);
skb_push(msdu, hdr_len);
- hdr = (struct ieee80211_hdr *)msdu->data;
- hdr->frame_control = fc;
-
- /* Get wifi header from rx_desc */
- ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, hdr);
+ memcpy(msdu->data, &hdr, min(hdr_len, sizeof(hdr)));
if (rxcb->is_mcbc)
status->flag &= ~RX_FLAG_PN_VALIDATED;
/* Add QOS header */
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- qos_ctl = rxcb->tid;
- if (ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc))
- qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
+ if (ieee80211_is_data_qos(hdr.frame_control)) {
+ struct ieee80211_hdr *qos_ptr = (struct ieee80211_hdr *)msdu->data;
- /* TODO: Add other QoS ctl fields when required */
- memcpy(msdu->data + (hdr_len - IEEE80211_QOS_CTL_LEN),
- &qos_ctl, IEEE80211_QOS_CTL_LEN);
+ qos_ctl = cpu_to_le16(rxcb->tid & IEEE80211_QOS_CTL_TID_MASK);
+ if (mesh_ctrl)
+ qos_ctl |= cpu_to_le16(IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT);
+
+ memcpy(ieee80211_get_qos_ctl(qos_ptr), &qos_ctl, IEEE80211_QOS_CTL_LEN);
}
}
@@ -2174,10 +2282,10 @@ static void ath12k_dp_rx_h_undecap(struct ath12k *ar, struct sk_buff *msdu,
}
struct ath12k_peer *
-ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu)
+ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu,
+ struct ath12k_dp_rx_info *rx_info)
{
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- struct hal_rx_desc *rx_desc = rxcb->rx_desc;
struct ath12k_peer *peer = NULL;
lockdep_assert_held(&ab->base_lock);
@@ -2188,40 +2296,41 @@ ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu)
if (peer)
return peer;
- if (!rx_desc || !(ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
- return NULL;
+ if (rx_info->addr2_present)
+ peer = ath12k_peer_find_by_addr(ab, rx_info->addr2);
- peer = ath12k_peer_find_by_addr(ab,
- ath12k_dp_rxdesc_get_mpdu_start_addr2(ab,
- rx_desc));
return peer;
}
static void ath12k_dp_rx_h_mpdu(struct ath12k *ar,
struct sk_buff *msdu,
struct hal_rx_desc *rx_desc,
- struct ieee80211_rx_status *rx_status)
+ struct ath12k_dp_rx_info *rx_info)
{
- bool fill_crypto_hdr;
struct ath12k_base *ab = ar->ab;
struct ath12k_skb_rxcb *rxcb;
enum hal_encrypt_type enctype;
bool is_decrypted = false;
struct ieee80211_hdr *hdr;
struct ath12k_peer *peer;
+ struct ieee80211_rx_status *rx_status = rx_info->rx_status;
u32 err_bitmap;
/* PN for multicast packets will be checked in mac80211 */
rxcb = ATH12K_SKB_RXCB(msdu);
- fill_crypto_hdr = ath12k_dp_rx_h_is_da_mcbc(ar->ab, rx_desc);
- rxcb->is_mcbc = fill_crypto_hdr;
+ rxcb->is_mcbc = rx_info->is_mcbc;
if (rxcb->is_mcbc)
- rxcb->peer_id = ath12k_dp_rx_h_peer_id(ar->ab, rx_desc);
+ rxcb->peer_id = rx_info->peer_id;
spin_lock_bh(&ar->ab->base_lock);
- peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
+ peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu, rx_info);
if (peer) {
+ /* resetting mcbc bit because mcbc packets are unicast
+ * packets only for AP as STA sends unicast packets.
+ */
+ rxcb->is_mcbc = rxcb->is_mcbc && !peer->ucast_ra_only;
+
if (rxcb->is_mcbc)
enctype = peer->sec_type_grp;
else
@@ -2250,7 +2359,7 @@ static void ath12k_dp_rx_h_mpdu(struct ath12k *ar,
if (is_decrypted) {
rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
- if (fill_crypto_hdr)
+ if (rx_info->is_mcbc)
rx_status->flag |= RX_FLAG_MIC_STRIPPED |
RX_FLAG_ICV_STRIPPED;
else
@@ -2258,37 +2367,28 @@ static void ath12k_dp_rx_h_mpdu(struct ath12k *ar,
RX_FLAG_PN_VALIDATED;
}
- ath12k_dp_rx_h_csum_offload(ar, msdu);
+ ath12k_dp_rx_h_csum_offload(msdu, rx_info);
ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
enctype, rx_status, is_decrypted);
- if (!is_decrypted || fill_crypto_hdr)
+ if (!is_decrypted || rx_info->is_mcbc)
return;
- if (ath12k_dp_rx_h_decap_type(ar->ab, rx_desc) !=
- DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
+ if (rx_info->decap_type != DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
hdr = (void *)msdu->data;
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
}
}
-static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct hal_rx_desc *rx_desc,
- struct ieee80211_rx_status *rx_status)
+static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
{
- struct ath12k_base *ab = ar->ab;
struct ieee80211_supported_band *sband;
- enum rx_msdu_start_pkt_type pkt_type;
- u8 bw;
- u8 rate_mcs, nss;
- u8 sgi;
+ struct ieee80211_rx_status *rx_status = rx_info->rx_status;
+ enum rx_msdu_start_pkt_type pkt_type = rx_info->pkt_type;
+ u8 bw = rx_info->bw, sgi = rx_info->sgi;
+ u8 rate_mcs = rx_info->rate_mcs, nss = rx_info->nss;
bool is_cck;
- pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc);
- bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc);
- rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc);
- nss = ath12k_dp_rx_h_nss(ab, rx_desc);
- sgi = ath12k_dp_rx_h_sgi(ab, rx_desc);
-
switch (pkt_type) {
case RX_MSDU_START_PKT_TYPE_11A:
case RX_MSDU_START_PKT_TYPE_11B:
@@ -2337,13 +2437,55 @@ static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct hal_rx_desc *rx_desc,
rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
break;
+ case RX_MSDU_START_PKT_TYPE_11BE:
+ rx_status->rate_idx = rate_mcs;
+
+ if (rate_mcs > ATH12K_EHT_MCS_MAX) {
+ ath12k_warn(ar->ab,
+ "Received with invalid mcs in EHT mode %d\n",
+ rate_mcs);
+ break;
+ }
+
+ rx_status->encoding = RX_ENC_EHT;
+ rx_status->nss = nss;
+ rx_status->eht.gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi);
+ rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
+ break;
+ default:
+ break;
}
}
-void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
- struct ieee80211_rx_status *rx_status)
+void ath12k_dp_rx_h_fetch_info(struct ath12k_base *ab, struct hal_rx_desc *rx_desc,
+ struct ath12k_dp_rx_info *rx_info)
{
- struct ath12k_base *ab = ar->ab;
+ rx_info->ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rx_desc);
+ rx_info->l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rx_desc);
+ rx_info->is_mcbc = ath12k_dp_rx_h_is_da_mcbc(ab, rx_desc);
+ rx_info->decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc);
+ rx_info->pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc);
+ rx_info->sgi = ath12k_dp_rx_h_sgi(ab, rx_desc);
+ rx_info->rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc);
+ rx_info->bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc);
+ rx_info->nss = ath12k_dp_rx_h_nss(ab, rx_desc);
+ rx_info->tid = ath12k_dp_rx_h_tid(ab, rx_desc);
+ rx_info->peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc);
+ rx_info->phy_meta_data = ath12k_dp_rx_h_freq(ab, rx_desc);
+
+ if (ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)) {
+ ether_addr_copy(rx_info->addr2,
+ ath12k_dp_rxdesc_get_mpdu_start_addr2(ab, rx_desc));
+ rx_info->addr2_present = true;
+ }
+
+ ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "rx_desc: ",
+ rx_desc, sizeof(*rx_desc));
+}
+
+void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
+{
+ struct ieee80211_rx_status *rx_status = rx_info->rx_status;
u8 channel_num;
u32 center_freq, meta_data;
struct ieee80211_channel *channel;
@@ -2357,12 +2499,14 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
- meta_data = ath12k_dp_rx_h_freq(ab, rx_desc);
+ meta_data = rx_info->phy_meta_data;
channel_num = meta_data;
center_freq = meta_data >> 16;
- if (center_freq >= 5935 && center_freq <= 7105) {
+ if (center_freq >= ATH12K_MIN_6GHZ_FREQ &&
+ center_freq <= ATH12K_MAX_6GHZ_FREQ) {
rx_status->band = NL80211_BAND_6GHZ;
+ rx_status->freq = center_freq;
} else if (channel_num >= 1 && channel_num <= 14) {
rx_status->band = NL80211_BAND_2GHZ;
} else if (channel_num >= 36 && channel_num <= 173) {
@@ -2376,19 +2520,18 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
ieee80211_frequency_to_channel(channel->center_freq);
}
spin_unlock_bh(&ar->data_lock);
- ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "rx_desc: ",
- rx_desc, sizeof(*rx_desc));
}
- rx_status->freq = ieee80211_channel_to_frequency(channel_num,
- rx_status->band);
+ if (rx_status->band != NL80211_BAND_6GHZ)
+ rx_status->freq = ieee80211_channel_to_frequency(channel_num,
+ rx_status->band);
- ath12k_dp_rx_h_rate(ar, rx_desc, rx_status);
+ ath12k_dp_rx_h_rate(ar, rx_info);
}
static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
struct sk_buff *msdu,
- struct ieee80211_rx_status *status)
+ struct ath12k_dp_rx_info *rx_info)
{
struct ath12k_base *ab = ar->ab;
static const struct ieee80211_radiotap_he known = {
@@ -2401,6 +2544,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
struct ieee80211_sta *pubsta;
struct ath12k_peer *peer;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ struct ieee80211_rx_status *status = rx_info->rx_status;
u8 decap = DP_RX_DECAP_TYPE_RAW;
bool is_mcbc = rxcb->is_mcbc;
bool is_eapol = rxcb->is_eapol;
@@ -2413,17 +2557,22 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
}
if (!(status->flag & RX_FLAG_ONLY_MONITOR))
- decap = ath12k_dp_rx_h_decap_type(ab, rxcb->rx_desc);
+ decap = rx_info->decap_type;
spin_lock_bh(&ab->base_lock);
- peer = ath12k_dp_rx_h_find_peer(ab, msdu);
+ peer = ath12k_dp_rx_h_find_peer(ab, msdu, rx_info);
pubsta = peer ? peer->sta : NULL;
+ if (pubsta && pubsta->valid_links) {
+ status->link_valid = 1;
+ status->link_id = peer->link_id;
+ }
+
spin_unlock_bh(&ab->base_lock);
ath12k_dbg(ab, ATH12K_DBG_DATA,
- "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
msdu,
msdu->len,
peer ? peer->addr : NULL,
@@ -2434,6 +2583,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
(status->encoding == RX_ENC_HT) ? "ht" : "",
(status->encoding == RX_ENC_VHT) ? "vht" : "",
(status->encoding == RX_ENC_HE) ? "he" : "",
+ (status->encoding == RX_ENC_EHT) ? "eht" : "",
(status->bw == RATE_INFO_BW_40) ? "40" : "",
(status->bw == RATE_INFO_BW_80) ? "80" : "",
(status->bw == RATE_INFO_BW_160) ? "160" : "",
@@ -2467,10 +2617,33 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi);
}
+static bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_base *ab,
+ struct hal_rx_desc *rx_desc,
+ struct sk_buff *msdu)
+{
+ struct ieee80211_hdr *hdr;
+ u8 decap_type;
+ u32 hdr_len;
+
+ decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc);
+ if (decap_type != DP_RX_DECAP_TYPE_NATIVE_WIFI)
+ return true;
+
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if ((likely(hdr_len <= DP_MAX_NWIFI_HDR_LEN)))
+ return true;
+
+ ab->device_stats.invalid_rbm++;
+ WARN_ON_ONCE(1);
+ return false;
+}
+
static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
struct sk_buff *msdu,
struct sk_buff_head *msdu_list,
- struct ieee80211_rx_status *rx_status)
+ struct ath12k_dp_rx_info *rx_info)
{
struct ath12k_base *ab = ar->ab;
struct hal_rx_desc *rx_desc, *lrx_desc;
@@ -2525,10 +2698,16 @@ static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
}
}
- ath12k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
- ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
+ if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu))) {
+ ret = -EINVAL;
+ goto free_out;
+ }
- rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
+ ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info);
+ ath12k_dp_rx_h_ppdu(ar, rx_info);
+ ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_info);
+
+ rx_info->rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
return 0;
@@ -2541,34 +2720,44 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
struct sk_buff_head *msdu_list,
int ring_id)
{
+ struct ath12k_hw_group *ag = ab->ag;
struct ieee80211_rx_status rx_status = {0};
struct ath12k_skb_rxcb *rxcb;
struct sk_buff *msdu;
struct ath12k *ar;
- u8 mac_id, pdev_id;
+ struct ath12k_hw_link *hw_links = ag->hw_links;
+ struct ath12k_base *partner_ab;
+ struct ath12k_dp_rx_info rx_info;
+ u8 hw_link_id, pdev_id;
int ret;
if (skb_queue_empty(msdu_list))
return;
+ rx_info.addr2_present = false;
+ rx_info.rx_status = &rx_status;
+
rcu_read_lock();
while ((msdu = __skb_dequeue(msdu_list))) {
rxcb = ATH12K_SKB_RXCB(msdu);
- mac_id = rxcb->mac_id;
- pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
- ar = ab->pdevs[pdev_id].ar;
- if (!rcu_dereference(ab->pdevs_active[pdev_id])) {
+ hw_link_id = rxcb->hw_link_id;
+ partner_ab = ath12k_ag_to_ab(ag,
+ hw_links[hw_link_id].device_id);
+ pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
+ hw_links[hw_link_id].pdev_idx);
+ ar = partner_ab->pdevs[pdev_id].ar;
+ if (!rcu_dereference(partner_ab->pdevs_active[pdev_id])) {
dev_kfree_skb_any(msdu);
continue;
}
- if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+ if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
dev_kfree_skb_any(msdu);
continue;
}
- ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
+ ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_info);
if (ret) {
ath12k_dbg(ab, ATH12K_DBG_DATA,
"Unable to process msdu %d", ret);
@@ -2576,31 +2765,61 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
continue;
}
- ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
+ ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info);
}
rcu_read_unlock();
}
+static u16 ath12k_dp_rx_get_peer_id(struct ath12k_base *ab,
+ enum ath12k_peer_metadata_version ver,
+ __le32 peer_metadata)
+{
+ switch (ver) {
+ default:
+ ath12k_warn(ab, "Unknown peer metadata version: %d", ver);
+ fallthrough;
+ case ATH12K_PEER_METADATA_V0:
+ return le32_get_bits(peer_metadata,
+ RX_MPDU_DESC_META_DATA_V0_PEER_ID);
+ case ATH12K_PEER_METADATA_V1:
+ return le32_get_bits(peer_metadata,
+ RX_MPDU_DESC_META_DATA_V1_PEER_ID);
+ case ATH12K_PEER_METADATA_V1A:
+ return le32_get_bits(peer_metadata,
+ RX_MPDU_DESC_META_DATA_V1A_PEER_ID);
+ case ATH12K_PEER_METADATA_V1B:
+ return le32_get_bits(peer_metadata,
+ RX_MPDU_DESC_META_DATA_V1B_PEER_ID);
+ }
+}
+
int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
struct napi_struct *napi, int budget)
{
+ struct ath12k_hw_group *ag = ab->ag;
+ struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
+ struct ath12k_hw_link *hw_links = ag->hw_links;
+ int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
struct ath12k_rx_desc_info *desc_info;
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
struct hal_reo_dest_ring *desc;
- int num_buffs_reaped = 0;
+ struct ath12k_base *partner_ab;
struct sk_buff_head msdu_list;
struct ath12k_skb_rxcb *rxcb;
int total_msdu_reaped = 0;
+ u8 hw_link_id, device_id;
struct hal_srng *srng;
struct sk_buff *msdu;
bool done = false;
- int mac_id;
u64 desc_va;
__skb_queue_head_init(&msdu_list);
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
+ INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
+
srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
spin_lock_bh(&srng->lock);
@@ -2609,24 +2828,38 @@ try_again:
ath12k_hal_srng_access_begin(ab, srng);
while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
+ struct rx_mpdu_desc *mpdu_info;
+ struct rx_msdu_desc *msdu_info;
enum hal_reo_dest_ring_push_reason push_reason;
u32 cookie;
cookie = le32_get_bits(desc->buf_addr_info.info1,
BUFFER_ADDR_INFO1_SW_COOKIE);
- mac_id = le32_get_bits(desc->info0,
- HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
+ hw_link_id = le32_get_bits(desc->info0,
+ HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
le32_to_cpu(desc->buf_va_lo));
desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
+ device_id = hw_links[hw_link_id].device_id;
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+ if (unlikely(!partner_ab)) {
+ if (desc_info->skb) {
+ dev_kfree_skb_any(desc_info->skb);
+ desc_info->skb = NULL;
+ }
+
+ continue;
+ }
+
/* retry manual desc retrieval */
if (!desc_info) {
- desc_info = ath12k_dp_get_rx_desc(ab, cookie);
+ desc_info = ath12k_dp_get_rx_desc(partner_ab, cookie);
if (!desc_info) {
- ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
+ ath12k_warn(partner_ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
+ cookie);
continue;
}
}
@@ -2637,36 +2870,38 @@ try_again:
msdu = desc_info->skb;
desc_info->skb = NULL;
- spin_lock_bh(&dp->rx_desc_lock);
- list_move_tail(&desc_info->list, &dp->rx_desc_free_list);
- spin_unlock_bh(&dp->rx_desc_lock);
+ list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]);
rxcb = ATH12K_SKB_RXCB(msdu);
- dma_unmap_single(ab->dev, rxcb->paddr,
+ dma_unmap_single(partner_ab->dev, rxcb->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
- num_buffs_reaped++;
+ num_buffs_reaped[device_id]++;
+ ab->device_stats.reo_rx[ring_id][ab->device_id]++;
push_reason = le32_get_bits(desc->info0,
HAL_REO_DEST_RING_INFO0_PUSH_REASON);
if (push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
dev_kfree_skb_any(msdu);
- ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
+ ab->device_stats.hal_reo_error[ring_id]++;
continue;
}
- rxcb->is_first_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
+ msdu_info = &desc->rx_msdu_info;
+ mpdu_info = &desc->rx_mpdu_info;
+
+ rxcb->is_first_msdu = !!(le32_to_cpu(msdu_info->info0) &
RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
- rxcb->is_last_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
+ rxcb->is_last_msdu = !!(le32_to_cpu(msdu_info->info0) &
RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
- rxcb->is_continuation = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
+ rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) &
RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
- rxcb->mac_id = mac_id;
- rxcb->peer_id = le32_get_bits(desc->rx_mpdu_info.peer_meta_data,
- RX_MPDU_DESC_META_DATA_PEER_ID);
- rxcb->tid = le32_get_bits(desc->rx_mpdu_info.info0,
+ rxcb->hw_link_id = hw_link_id;
+ rxcb->peer_id = ath12k_dp_rx_get_peer_id(ab, dp->peer_metadata_ver,
+ mpdu_info->peer_meta_data);
+ rxcb->tid = le32_get_bits(mpdu_info->info0,
RX_MPDU_DESC_INFO0_TID);
__skb_queue_tail(&msdu_list, msdu);
@@ -2700,7 +2935,17 @@ try_again:
if (!total_msdu_reaped)
goto exit;
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_buffs_reaped);
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
+ if (!num_buffs_reaped[device_id])
+ continue;
+
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+ rx_ring = &partner_ab->dp.rx_refill_buf_ring;
+
+ ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring,
+ &rx_desc_used_list[device_id],
+ num_buffs_reaped[device_id]);
+ }
ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list,
ring_id);
@@ -2711,7 +2956,8 @@ exit:
static void ath12k_dp_rx_frag_timer(struct timer_list *timer)
{
- struct ath12k_dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
+ struct ath12k_dp_rx_tid *rx_tid = timer_container_of(rx_tid, timer,
+ frag_timer);
spin_lock_bh(&rx_tid->ab->base_lock);
if (rx_tid->last_frag_no &&
@@ -2740,10 +2986,17 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev
peer = ath12k_peer_find(ab, vdev_id, peer_mac);
if (!peer) {
spin_unlock_bh(&ab->base_lock);
+ crypto_free_shash(tfm);
ath12k_warn(ab, "failed to find the peer to set up fragment info\n");
return -ENOENT;
}
+ if (!peer->primary_link) {
+ spin_unlock_bh(&ab->base_lock);
+ crypto_free_shash(tfm);
+ return 0;
+ }
+
for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
rx_tid = &peer->rx_tid[i];
rx_tid->ab = ab;
@@ -2807,6 +3060,7 @@ static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer
struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
struct ieee80211_key_conf *key_conf;
struct ieee80211_hdr *hdr;
+ struct ath12k_dp_rx_info rx_info;
u8 mic[IEEE80211_CCMP_MIC_LEN];
int head_len, tail_len, ret;
size_t data_len;
@@ -2817,6 +3071,9 @@ static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer
if (ath12k_dp_rx_h_enctype(ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC)
return 0;
+ rx_info.addr2_present = false;
+ rx_info.rx_status = rxs;
+
hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
hdr_len = ieee80211_hdrlen(hdr->frame_control);
head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
@@ -2843,11 +3100,16 @@ mic_fail:
(ATH12K_SKB_RXCB(msdu))->is_first_msdu = true;
(ATH12K_SKB_RXCB(msdu))->is_last_msdu = true;
+ ath12k_dp_rx_h_fetch_info(ab, rx_desc, &rx_info);
+
rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
skb_pull(msdu, hal_rx_desc_sz);
- ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
+ if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu)))
+ return -EINVAL;
+
+ ath12k_dp_rx_h_ppdu(ar, &rx_info);
ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
ieee80211_rx(ath12k_ar_to_hw(ar), msdu);
@@ -2969,9 +3231,10 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
struct hal_srng *srng;
dma_addr_t link_paddr, buf_paddr;
u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info;
- u32 cookie, hal_rx_desc_sz, dest_ring_info0;
+ u32 cookie, hal_rx_desc_sz, dest_ring_info0, queue_addr_hi;
int ret;
struct ath12k_rx_desc_info *desc_info;
+ enum hal_rx_buf_return_buf_manager idle_link_rbm = dp->idle_link_rbm;
u8 dst_ind;
hal_rx_desc_sz = ab->hal.hal_desc_sz;
@@ -3005,7 +3268,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
buf_paddr = dma_map_single(ab->dev, defrag_skb->data,
defrag_skb->len + skb_tailroom(defrag_skb),
- DMA_FROM_DEVICE);
+ DMA_TO_DEVICE);
if (dma_mapping_error(ab->dev, buf_paddr))
return -ENOMEM;
@@ -3021,9 +3284,9 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
}
desc_info->skb = defrag_skb;
+ desc_info->in_use = true;
list_del(&desc_info->list);
- list_add_tail(&desc_info->list, &dp->rx_desc_used_list);
spin_unlock_bh(&dp->rx_desc_lock);
ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr;
@@ -3049,7 +3312,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr,
cookie,
- HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST);
+ idle_link_rbm);
mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) |
u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) |
@@ -3061,13 +3324,18 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
reo_ent_ring->rx_mpdu_info.peer_meta_data =
reo_dest_ring->rx_mpdu_info.peer_meta_data;
- /* Firmware expects physical address to be filled in queue_addr_lo in
- * the MLO scenario and in case of non MLO peer meta data needs to be
- * filled.
- * TODO: Need to handle for MLO scenario.
- */
- reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data;
- reo_ent_ring->info0 = le32_encode_bits(dst_ind,
+ if (ab->hw_params->reoq_lut_support) {
+ reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data;
+ queue_addr_hi = 0;
+ } else {
+ reo_ent_ring->queue_addr_lo =
+ cpu_to_le32(lower_32_bits(rx_tid->qbuf.paddr_aligned));
+ queue_addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
+ }
+
+ reo_ent_ring->info0 = le32_encode_bits(queue_addr_hi,
+ HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) |
+ le32_encode_bits(dst_ind,
HAL_REO_ENTR_RING_INFO0_DEST_IND);
reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn,
@@ -3085,13 +3353,13 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
err_free_desc:
spin_lock_bh(&dp->rx_desc_lock);
- list_del(&desc_info->list);
- list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
+ desc_info->in_use = false;
desc_info->skb = NULL;
+ list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
spin_unlock_bh(&dp->rx_desc_lock);
err_unmap_dma:
dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
- DMA_FROM_DEVICE);
+ DMA_TO_DEVICE);
return ret;
}
@@ -3260,7 +3528,7 @@ static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
goto out_unlock;
}
} else {
- ath12k_dp_rx_link_desc_return(ab, ring_desc,
+ ath12k_dp_rx_link_desc_return(ab, &ring_desc->buf_addr_info,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
@@ -3272,7 +3540,7 @@ static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
}
spin_unlock_bh(&ab->base_lock);
- del_timer_sync(&rx_tid->frag_timer);
+ timer_delete_sync(&rx_tid->frag_timer);
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find_by_id(ab, peer_id);
@@ -3304,6 +3572,7 @@ out_unlock:
static int
ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
+ struct list_head *used_list,
bool drop, u32 cookie)
{
struct ath12k_base *ab = ar->ab;
@@ -3323,7 +3592,8 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
if (!desc_info) {
desc_info = ath12k_dp_get_rx_desc(ab, cookie);
if (!desc_info) {
- ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
+ ath12k_warn(ab, "Invalid cookie in DP rx error descriptor retrieval: 0x%x\n",
+ cookie);
return -EINVAL;
}
}
@@ -3333,9 +3603,8 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
msdu = desc_info->skb;
desc_info->skb = NULL;
- spin_lock_bh(&ab->dp.rx_desc_lock);
- list_move_tail(&desc_info->list, &ab->dp.rx_desc_free_list);
- spin_unlock_bh(&ab->dp.rx_desc_lock);
+
+ list_add_tail(&desc_info->list, used_list);
rxcb = ATH12K_SKB_RXCB(msdu);
dma_unmap_single(ar->ab->dev, rxcb->paddr,
@@ -3353,7 +3622,7 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
goto exit;
}
- if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+ if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
dev_kfree_skb_any(msdu);
goto exit;
}
@@ -3372,7 +3641,7 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
if (ath12k_dp_rx_frag_h_mpdu(ar, msdu, desc)) {
dev_kfree_skb_any(msdu);
- ath12k_dp_rx_link_desc_return(ar->ab, desc,
+ ath12k_dp_rx_link_desc_return(ar->ab, &desc->buf_addr_info,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
exit:
@@ -3383,7 +3652,10 @@ exit:
int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
int budget)
{
+ struct ath12k_hw_group *ag = ab->ag;
+ struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
+ int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
struct dp_link_desc_bank *link_desc_banks;
enum hal_rx_buf_return_buf_manager rbm;
struct hal_rx_msdu_link *link_desc_va;
@@ -3391,22 +3663,24 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
struct hal_reo_dest_ring *reo_desc;
struct dp_rxdma_ring *rx_ring;
struct dp_srng *reo_except;
+ struct ath12k_hw_link *hw_links = ag->hw_links;
+ struct ath12k_base *partner_ab;
+ u8 hw_link_id, device_id;
u32 desc_bank, num_msdus;
struct hal_srng *srng;
- struct ath12k_dp *dp;
- int mac_id;
struct ath12k *ar;
dma_addr_t paddr;
bool is_frag;
- bool drop = false;
+ bool drop;
int pdev_id;
tot_n_bufs_reaped = 0;
quota = budget;
- dp = &ab->dp;
- reo_except = &dp->reo_except_ring;
- link_desc_banks = dp->link_desc_banks;
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
+ INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
+
+ reo_except = &ab->dp.reo_except_ring;
srng = &ab->hal.srng_list[reo_except->ring_id];
@@ -3416,7 +3690,9 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
while (budget &&
(reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
- ab->soc_stats.err_ring_pkts++;
+ drop = false;
+ ab->device_stats.err_ring_pkts++;
+
ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr,
&desc_bank);
if (ret) {
@@ -3424,16 +3700,28 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
ret);
continue;
}
+
+ hw_link_id = le32_get_bits(reo_desc->info0,
+ HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
+ device_id = hw_links[hw_link_id].device_id;
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+
+ pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
+ hw_links[hw_link_id].pdev_idx);
+ ar = partner_ab->pdevs[pdev_id].ar;
+
+ link_desc_banks = partner_ab->dp.link_desc_banks;
link_desc_va = link_desc_banks[desc_bank].vaddr +
(paddr - link_desc_banks[desc_bank].paddr);
ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
&rbm);
- if (rbm != HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST &&
+ if (rbm != partner_ab->dp.idle_link_rbm &&
rbm != HAL_RX_BUF_RBM_SW3_BM &&
- rbm != ab->hw_params->hal_params->rx_buf_rbm) {
- ab->soc_stats.invalid_rbm++;
+ rbm != partner_ab->hw_params->hal_params->rx_buf_rbm) {
+ ab->device_stats.invalid_rbm++;
ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
- ath12k_dp_rx_link_desc_return(ab, reo_desc,
+ ath12k_dp_rx_link_desc_return(partner_ab,
+ &reo_desc->buf_addr_info,
HAL_WBM_REL_BM_ACT_REL_MSDU);
continue;
}
@@ -3443,24 +3731,27 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
/* Process only rx fragments with one msdu per link desc below, and drop
* msdu's indicated due to error reasons.
+ * Dynamic fragmentation not supported in Multi-link client, so drop the
+ * partner device buffers.
*/
- if (!is_frag || num_msdus > 1) {
+ if (!is_frag || num_msdus > 1 ||
+ partner_ab->device_id != ab->device_id) {
drop = true;
+
/* Return the link desc back to wbm idle list */
- ath12k_dp_rx_link_desc_return(ab, reo_desc,
+ ath12k_dp_rx_link_desc_return(partner_ab,
+ &reo_desc->buf_addr_info,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
for (i = 0; i < num_msdus; i++) {
- mac_id = le32_get_bits(reo_desc->info0,
- HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
-
- pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
- ar = ab->pdevs[pdev_id].ar;
-
- if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, drop,
- msdu_cookies[i]))
+ if (!ath12k_dp_process_rx_err_buf(ar, reo_desc,
+ &rx_desc_used_list[device_id],
+ drop,
+ msdu_cookies[i])) {
+ num_buffs_reaped[device_id]++;
tot_n_bufs_reaped++;
+ }
}
if (tot_n_bufs_reaped >= quota) {
@@ -3476,9 +3767,17 @@ exit:
spin_unlock_bh(&srng->lock);
- rx_ring = &dp->rx_refill_buf_ring;
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
+ if (!num_buffs_reaped[device_id])
+ continue;
+
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+ rx_ring = &partner_ab->dp.rx_refill_buf_ring;
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, tot_n_bufs_reaped);
+ ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring,
+ &rx_desc_used_list[device_id],
+ num_buffs_reaped[device_id]);
+ }
return tot_n_bufs_reaped;
}
@@ -3508,7 +3807,7 @@ static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar,
}
static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
- struct ieee80211_rx_status *status,
+ struct ath12k_dp_rx_info *rx_info,
struct sk_buff_head *msdu_list)
{
struct ath12k_base *ab = ar->ab;
@@ -3561,11 +3860,14 @@ static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
}
- ath12k_dp_rx_h_ppdu(ar, desc, status);
+ if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu)))
+ return -EINVAL;
- ath12k_dp_rx_h_mpdu(ar, msdu, desc, status);
+ ath12k_dp_rx_h_fetch_info(ab, desc, rx_info);
+ ath12k_dp_rx_h_ppdu(ar, rx_info);
+ ath12k_dp_rx_h_mpdu(ar, msdu, desc, rx_info);
- rxcb->tid = ath12k_dp_rx_h_tid(ab, desc);
+ rxcb->tid = rx_info->tid;
/* Please note that caller will having the access to msdu and completing
* rx with mac80211. Need not worry about cleaning up amsdu_list.
@@ -3575,17 +3877,17 @@ static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
}
static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu,
- struct ieee80211_rx_status *status,
+ struct ath12k_dp_rx_info *rx_info,
struct sk_buff_head *msdu_list)
{
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
bool drop = false;
- ar->ab->soc_stats.reo_error[rxcb->err_code]++;
+ ar->ab->device_stats.reo_error[rxcb->err_code]++;
switch (rxcb->err_code) {
case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
- if (ath12k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
+ if (ath12k_dp_rx_h_null_q_desc(ar, msdu, rx_info, msdu_list))
drop = true;
break;
case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
@@ -3605,8 +3907,8 @@ static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu,
return drop;
}
-static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
- struct ieee80211_rx_status *status)
+static bool ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
+ struct ath12k_dp_rx_info *rx_info)
{
struct ath12k_base *ab = ar->ab;
u16 msdu_len;
@@ -3620,20 +3922,33 @@ static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
+
+ if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "invalid msdu len in tkip mic err %u\n", msdu_len);
+ ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", desc,
+ sizeof(*desc));
+ return true;
+ }
+
skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
- ath12k_dp_rx_h_ppdu(ar, desc, status);
+ if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu)))
+ return true;
+
+ ath12k_dp_rx_h_ppdu(ar, rx_info);
- status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
- RX_FLAG_DECRYPTED);
+ rx_info->rx_status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
+ RX_FLAG_DECRYPTED);
ath12k_dp_rx_h_undecap(ar, msdu, desc,
- HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
+ HAL_ENCRYPT_TYPE_TKIP_MIC, rx_info->rx_status, false);
+ return false;
}
static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu,
- struct ieee80211_rx_status *status)
+ struct ath12k_dp_rx_info *rx_info)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
@@ -3641,14 +3956,15 @@ static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu,
bool drop = false;
u32 err_bitmap;
- ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
+ ar->ab->device_stats.rxdma_error[rxcb->err_code]++;
switch (rxcb->err_code) {
case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR:
case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) {
- ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status);
+ ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info);
+ drop = ath12k_dp_rx_h_tkip_mic_err(ar, msdu, rx_info);
break;
}
fallthrough;
@@ -3670,14 +3986,18 @@ static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
{
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
struct ieee80211_rx_status rxs = {0};
+ struct ath12k_dp_rx_info rx_info;
bool drop = true;
+ rx_info.addr2_present = false;
+ rx_info.rx_status = &rxs;
+
switch (rxcb->err_rel_src) {
case HAL_WBM_REL_SRC_MODULE_REO:
- drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
+ drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rx_info, msdu_list);
break;
case HAL_WBM_REL_SRC_MODULE_RXDMA:
- drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
+ drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rx_info);
break;
default:
/* msdu will get freed */
@@ -3689,31 +4009,40 @@ static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
return;
}
- ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
+ ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info);
}
int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
struct napi_struct *napi, int budget)
{
+ struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
+ struct ath12k_hw_group *ag = ab->ag;
struct ath12k *ar;
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring;
struct hal_rx_wbm_rel_info err_info;
struct hal_srng *srng;
struct sk_buff *msdu;
- struct sk_buff_head msdu_list;
+ struct sk_buff_head msdu_list, scatter_msdu_list;
struct ath12k_skb_rxcb *rxcb;
void *rx_desc;
- u8 mac_id;
- int num_buffs_reaped = 0;
+ int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
+ int total_num_buffs_reaped = 0;
struct ath12k_rx_desc_info *desc_info;
+ struct ath12k_device_dp_stats *device_stats = &ab->device_stats;
+ struct ath12k_hw_link *hw_links = ag->hw_links;
+ struct ath12k_base *partner_ab;
+ u8 hw_link_id, device_id;
int ret, pdev_id;
+ struct hal_rx_desc *msdu_data;
__skb_queue_head_init(&msdu_list);
+ __skb_queue_head_init(&scatter_msdu_list);
- srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
- rx_ring = &dp->rx_refill_buf_ring;
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
+ INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
+ srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
@@ -3737,7 +4066,8 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
if (!desc_info) {
desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie);
if (!desc_info) {
- ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
+ ath12k_warn(ab, "Invalid cookie in DP WBM rx error descriptor retrieval: 0x%x\n",
+ err_info.cookie);
continue;
}
}
@@ -3748,16 +4078,27 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
msdu = desc_info->skb;
desc_info->skb = NULL;
- spin_lock_bh(&dp->rx_desc_lock);
- list_move_tail(&desc_info->list, &dp->rx_desc_free_list);
- spin_unlock_bh(&dp->rx_desc_lock);
+ device_id = desc_info->device_id;
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+ if (unlikely(!partner_ab)) {
+ dev_kfree_skb_any(msdu);
+
+ /* In any case continuation bit is set
+ * in the previous record, cleanup scatter_msdu_list
+ */
+ ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
+ continue;
+ }
+
+ list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]);
rxcb = ATH12K_SKB_RXCB(msdu);
- dma_unmap_single(ab->dev, rxcb->paddr,
+ dma_unmap_single(partner_ab->dev, rxcb->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
- num_buffs_reaped++;
+ num_buffs_reaped[device_id]++;
+ total_num_buffs_reaped++;
if (!err_info.continuation)
budget--;
@@ -3768,47 +4109,111 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
continue;
}
+ msdu_data = (struct hal_rx_desc *)msdu->data;
rxcb->err_rel_src = err_info.err_rel_src;
rxcb->err_code = err_info.err_code;
- rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
-
- __skb_queue_tail(&msdu_list, msdu);
-
rxcb->is_first_msdu = err_info.first_msdu;
rxcb->is_last_msdu = err_info.last_msdu;
rxcb->is_continuation = err_info.continuation;
+ rxcb->rx_desc = msdu_data;
+
+ if (err_info.continuation) {
+ __skb_queue_tail(&scatter_msdu_list, msdu);
+ continue;
+ }
+
+ hw_link_id = ath12k_dp_rx_get_msdu_src_link(partner_ab,
+ msdu_data);
+ if (hw_link_id >= ATH12K_GROUP_MAX_RADIO) {
+ dev_kfree_skb_any(msdu);
+
+ /* In any case continuation bit is set
+ * in the previous record, cleanup scatter_msdu_list
+ */
+ ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
+ continue;
+ }
+
+ if (!skb_queue_empty(&scatter_msdu_list)) {
+ struct sk_buff *msdu;
+
+ skb_queue_walk(&scatter_msdu_list, msdu) {
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ rxcb->hw_link_id = hw_link_id;
+ }
+
+ skb_queue_splice_tail_init(&scatter_msdu_list,
+ &msdu_list);
+ }
+
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ rxcb->hw_link_id = hw_link_id;
+ __skb_queue_tail(&msdu_list, msdu);
}
+ /* In any case continuation bit is set in the
+ * last record, cleanup scatter_msdu_list
+ */
+ ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
+
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
- if (!num_buffs_reaped)
+ if (!total_num_buffs_reaped)
goto done;
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_buffs_reaped);
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
+ if (!num_buffs_reaped[device_id])
+ continue;
+
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+ rx_ring = &partner_ab->dp.rx_refill_buf_ring;
+
+ ath12k_dp_rx_bufs_replenish(ab, rx_ring,
+ &rx_desc_used_list[device_id],
+ num_buffs_reaped[device_id]);
+ }
rcu_read_lock();
while ((msdu = __skb_dequeue(&msdu_list))) {
- mac_id = ath12k_dp_rx_get_msdu_src_link(ab,
- (struct hal_rx_desc *)msdu->data);
- pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
- ar = ab->pdevs[pdev_id].ar;
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ hw_link_id = rxcb->hw_link_id;
+
+ device_id = hw_links[hw_link_id].device_id;
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+ if (unlikely(!partner_ab)) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "Unable to process WBM error msdu due to invalid hw link id %d device id %d\n",
+ hw_link_id, device_id);
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
- if (!ar || !rcu_dereference(ar->ab->pdevs_active[mac_id])) {
+ pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
+ hw_links[hw_link_id].pdev_idx);
+ ar = partner_ab->pdevs[pdev_id].ar;
+
+ if (!ar || !rcu_dereference(ar->ab->pdevs_active[pdev_id])) {
dev_kfree_skb_any(msdu);
continue;
}
- if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+ if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
dev_kfree_skb_any(msdu);
continue;
}
+
+ if (rxcb->err_rel_src < HAL_WBM_REL_SRC_MODULE_MAX) {
+ device_id = ar->ab->device_id;
+ device_stats->rx_wbm_rel_source[rxcb->err_rel_src][device_id]++;
+ }
+
ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list);
}
rcu_read_unlock();
done:
- return num_buffs_reaped;
+ return total_num_buffs_reaped;
}
void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
@@ -3830,7 +4235,7 @@ void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
ath12k_hal_srng_access_begin(ab, srng);
while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
- tag = u64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
+ tag = le64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
switch (tag) {
case HAL_REO_GET_QUEUE_STATS_STATUS:
@@ -3893,20 +4298,24 @@ void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
void ath12k_dp_rx_free(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
+ struct dp_srng *srng;
int i;
ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
if (ab->hw_params->rx_mac_buf_ring)
ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
+ if (!ab->hw_params->rxdma1_enable) {
+ srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
+ ath12k_dp_srng_cleanup(ab, srng);
+ }
}
for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++)
ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
- ath12k_dp_srng_cleanup(ab, &dp->tx_mon_buf_ring.refill_buf_ring);
ath12k_dp_rxdma_buf_free(ab);
}
@@ -3964,7 +4373,7 @@ int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
struct ath12k_dp *dp = &ab->dp;
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 ring_id;
- int ret;
+ int ret = 0;
u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
int i;
@@ -3990,7 +4399,7 @@ int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
* and modify the rx_desc struct
*/
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ring_id = dp->rx_mac_buf_ring[i].ring_id;
ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i,
HAL_RXDMA_BUF,
@@ -4017,7 +4426,7 @@ int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
}
if (ab->hw_params->rx_mac_buf_ring) {
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ring_id = dp->rx_mac_buf_ring[i].ring_id;
ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
i, HAL_RXDMA_BUF);
@@ -4049,14 +4458,18 @@ int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
ret);
return ret;
}
-
- ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id;
- ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
- 0, HAL_TX_MONITOR_BUF);
- if (ret) {
- ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
- ret);
- return ret;
+ } else {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id =
+ dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, i,
+ HAL_RXDMA_MONITOR_STATUS);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to configure mon_status_refill_ring%d %d\n",
+ i, ret);
+ return ret;
+ }
}
}
@@ -4072,14 +4485,12 @@ int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
int ath12k_dp_rx_alloc(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
+ struct dp_srng *srng;
int i, ret;
idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
- idr_init(&dp->tx_mon_buf_ring.bufs_idr);
- spin_lock_init(&dp->tx_mon_buf_ring.idr_lock);
-
ret = ath12k_dp_srng_setup(ab,
&dp->rx_refill_buf_ring.refill_buf_ring,
HAL_RXDMA_BUF, 0, 0,
@@ -4090,7 +4501,7 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab)
}
if (ab->hw_params->rx_mac_buf_ring) {
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ret = ath12k_dp_srng_setup(ab,
&dp->rx_mac_buf_ring[i],
HAL_RXDMA_BUF, 1,
@@ -4122,14 +4533,22 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab)
ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n");
return ret;
}
+ } else {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ idr_init(&dp->rx_mon_status_refill_ring[i].bufs_idr);
+ spin_lock_init(&dp->rx_mon_status_refill_ring[i].idr_lock);
+ }
- ret = ath12k_dp_srng_setup(ab,
- &dp->tx_mon_buf_ring.refill_buf_ring,
- HAL_TX_MONITOR_BUF, 0, 0,
- DP_TX_MONITOR_BUF_RING_SIZE);
- if (ret) {
- ath12k_warn(ab, "failed to setup DP_TX_MONITOR_BUF_RING_SIZE\n");
- return ret;
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
+ ret = ath12k_dp_srng_setup(ab, srng,
+ HAL_RXDMA_MONITOR_STATUS, 0, i,
+ DP_RXDMA_MON_STATUS_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup mon status ring %d\n",
+ i);
+ return ret;
+ }
}
}
@@ -4159,7 +4578,7 @@ int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id)
return ret;
}
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ring_id = dp->rxdma_mon_dst_ring[i].ring_id;
ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
mac_id + i,
@@ -4170,17 +4589,6 @@ int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id)
i, ret);
return ret;
}
-
- ring_id = dp->tx_mon_dst_ring[i].ring_id;
- ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
- mac_id + i,
- HAL_TX_MONITOR_DST);
- if (ret) {
- ath12k_warn(ab,
- "failed to configure tx_mon_dst_ring %d %d\n",
- i, ret);
- return ret;
- }
}
out:
return 0;
@@ -4212,41 +4620,15 @@ int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar)
return ret;
}
- /* if rxdma1_enable is false, no need to setup
- * rxdma_mon_desc_ring.
- */
- if (!ar->ab->hw_params->rxdma1_enable)
- return 0;
-
pmon->mon_last_linkdesc_paddr = 0;
pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
spin_lock_init(&pmon->mon_lock);
- return 0;
-}
-
-int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab)
-{
- /* start reap timer */
- mod_timer(&ab->mon_reap_timer,
- jiffies + msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL));
-
- return 0;
-}
-
-int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer)
-{
- int ret;
-
- if (stop_timer)
- del_timer_sync(&ab->mon_reap_timer);
+ if (!ar->ab->hw_params->rxdma1_enable)
+ return 0;
- /* reap all the monitor related rings */
- ret = ath12k_dp_purge_mon_ring(ab);
- if (ret) {
- ath12k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
- return ret;
- }
+ INIT_LIST_HEAD(&pmon->dp_rx_mon_mpdu_list);
+ pmon->mon_mpdu = NULL;
return 0;
}