diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath11k/dp_rx.c')
| -rw-r--r-- | drivers/net/wireless/ath/ath11k/dp_rx.c | 805 |
1 files changed, 489 insertions, 316 deletions
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index c212a789421e..b9e976ddcbbf 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/ieee80211.h> @@ -43,6 +44,13 @@ static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab, } static inline +bool ath11k_dp_rx_h_msdu_start_ldpc_support(struct ath11k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params.hw_ops->rx_desc_get_ldpc_support(desc); +} + +static inline u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab, struct hal_rx_desc *desc) { @@ -300,10 +308,10 @@ static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab, static void ath11k_dp_service_mon_ring(struct timer_list *t) { - struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer); + struct ath11k_base *ab = timer_container_of(ab, t, mon_reap_timer); int i; - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET); mod_timer(&ab->mon_reap_timer, jiffies + @@ -316,7 +324,7 @@ static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab) unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS); do { - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) reaped += ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET); @@ -382,10 +390,10 @@ int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, goto fail_free_skb; spin_lock_bh(&rx_ring->idr_lock); - buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, - rx_ring->bufs_max * 3, GFP_ATOMIC); + buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 1, + (rx_ring->bufs_max * 3) + 1, GFP_ATOMIC); spin_unlock_bh(&rx_ring->idr_lock); - if (buf_id < 0) + if (buf_id <= 0) goto fail_dma_unmap; desc = ath11k_hal_srng_src_get_next_entry(ab, srng); @@ -428,7 +436,6 @@ fail_free_skb: static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar, struct dp_rxdma_ring *rx_ring) { - struct ath11k_pdev_dp *dp = &ar->dp; struct sk_buff *skb; int buf_id; @@ -446,28 +453,6 @@ static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar, idr_destroy(&rx_ring->bufs_idr); spin_unlock_bh(&rx_ring->idr_lock); - /* if rxdma1_enable is false, mon_status_refill_ring - * isn't setup, so don't clean. - */ - if (!ar->ab->hw_params.rxdma1_enable) - return 0; - - rx_ring = &dp->rx_mon_status_refill_ring[0]; - - spin_lock_bh(&rx_ring->idr_lock); - idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { - idr_remove(&rx_ring->bufs_idr, buf_id); - /* XXX: Understand where internal driver does this dma_unmap - * of rxdma_buffer. - */ - dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, - skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL); - dev_kfree_skb_any(skb); - } - - idr_destroy(&rx_ring->bufs_idr); - spin_unlock_bh(&rx_ring->idr_lock); - return 0; } @@ -483,7 +468,7 @@ static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar) rx_ring = &dp->rxdma_mon_buf_ring; ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { rx_ring = &dp->rx_mon_status_refill_ring[i]; ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); } @@ -521,7 +506,7 @@ static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar) ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF); } - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { rx_ring = &dp->rx_mon_status_refill_ring[i]; ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS); } @@ -537,7 +522,7 @@ static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar) ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring); - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { if (ab->hw_params.rx_mac_buf_ring) ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]); @@ -600,7 +585,7 @@ static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar) } if (ar->ab->hw_params.rx_mac_buf_ring) { - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ret = ath11k_dp_srng_setup(ar->ab, &dp->rx_mac_buf_ring[i], HAL_RXDMA_BUF, 1, @@ -613,7 +598,7 @@ static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar) } } - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i], HAL_RXDMA_DST, 0, dp->mac_id + i, DP_RXDMA_ERR_DST_RING_SIZE); @@ -623,7 +608,7 @@ static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar) } } - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; ret = ath11k_dp_srng_setup(ar->ab, srng, @@ -684,13 +669,18 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) struct ath11k_dp *dp = &ab->dp; struct dp_reo_cmd *cmd, *tmp; struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache; + struct dp_rx_tid *rx_tid; spin_lock_bh(&dp->reo_cmd_lock); list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { list_del(&cmd->list); - dma_unmap_single(ab->dev, cmd->data.paddr, - cmd->data.size, DMA_BIDIRECTIONAL); - kfree(cmd->data.vaddr); + rx_tid = &cmd->data; + if (rx_tid->vaddr_unaligned) { + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, + rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; + } kfree(cmd); } @@ -698,9 +688,13 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) &dp->reo_cmd_cache_flush_list, list) { list_del(&cmd_cache->list); dp->reo_cmd_cache_flush_count--; - dma_unmap_single(ab->dev, cmd_cache->data.paddr, - cmd_cache->data.size, DMA_BIDIRECTIONAL); - kfree(cmd_cache->data.vaddr); + rx_tid = &cmd_cache->data; + if (rx_tid->vaddr_unaligned) { + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, + rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; + } kfree(cmd_cache); } spin_unlock_bh(&dp->reo_cmd_lock); @@ -714,16 +708,18 @@ static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx, if (status != HAL_REO_CMD_SUCCESS) ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", rx_tid->tid, status); - - dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size, - DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); + if (rx_tid->vaddr_unaligned) { + dma_free_noncoherent(dp->ab->dev, rx_tid->unaligned_size, + rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; + } } static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab, struct dp_rx_tid *rx_tid) { - struct ath11k_hal_reo_cmd cmd = {0}; + struct ath11k_hal_reo_cmd cmd = {}; unsigned long tot_desc_sz, desc_sz; int ret; @@ -753,9 +749,10 @@ static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab, if (ret) { ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", rx_tid->tid, ret); - dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, - DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, + rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; } } @@ -805,21 +802,24 @@ static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx, return; free_desc: - dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, - DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, + rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; } void ath11k_peer_rx_tid_delete(struct ath11k *ar, struct ath11k_peer *peer, u8 tid) { - struct ath11k_hal_reo_cmd cmd = {0}; + struct ath11k_hal_reo_cmd cmd = {}; struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; int ret; if (!rx_tid->active) return; + rx_tid->active = false; + cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; cmd.addr_lo = lower_32_bits(rx_tid->paddr); cmd.addr_hi = upper_32_bits(rx_tid->paddr); @@ -828,14 +828,19 @@ void ath11k_peer_rx_tid_delete(struct ath11k *ar, HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, ath11k_dp_rx_tid_del_func); if (ret) { - ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", - tid, ret); - dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size, - DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); + if (ret != -ESHUTDOWN) + ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", + tid, ret); + dma_free_noncoherent(ar->ab->dev, rx_tid->unaligned_size, + rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; } - rx_tid->active = false; + rx_tid->paddr = 0; + rx_tid->paddr_unaligned = 0; + rx_tid->size = 0; + rx_tid->unaligned_size = 0; } static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab, @@ -901,7 +906,7 @@ void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer) rx_tid = &peer->rx_tid[i]; spin_unlock_bh(&ar->ab->base_lock); - del_timer_sync(&rx_tid->frag_timer); + timer_delete_sync(&rx_tid->frag_timer); spin_lock_bh(&ar->ab->base_lock); ath11k_dp_rx_frags_cleanup(rx_tid, true); @@ -922,7 +927,7 @@ void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer) ath11k_dp_rx_frags_cleanup(rx_tid, true); spin_unlock_bh(&ar->ab->base_lock); - del_timer_sync(&rx_tid->frag_timer); + timer_delete_sync(&rx_tid->frag_timer); spin_lock_bh(&ar->ab->base_lock); } } @@ -933,7 +938,7 @@ static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar, u32 ba_win_sz, u16 ssn, bool update_ssn) { - struct ath11k_hal_reo_cmd cmd = {0}; + struct ath11k_hal_reo_cmd cmd = {}; int ret; cmd.addr_lo = lower_32_bits(rx_tid->paddr); @@ -979,9 +984,9 @@ static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab, if (!rx_tid->active) goto unlock_exit; - dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, - DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; rx_tid->active = false; @@ -996,9 +1001,8 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, struct ath11k_base *ab = ar->ab; struct ath11k_peer *peer; struct dp_rx_tid *rx_tid; - u32 hw_desc_sz; - u32 *addr_aligned; - void *vaddr; + u32 hw_desc_sz, *vaddr; + void *vaddr_unaligned; dma_addr_t paddr; int ret; @@ -1006,7 +1010,8 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, peer = ath11k_peer_find(ab, vdev_id, peer_mac); if (!peer) { - ath11k_warn(ab, "failed to find the peer to set up rx tid\n"); + ath11k_warn(ab, "failed to find the peer %pM to set up rx tid\n", + peer_mac); spin_unlock_bh(&ab->base_lock); return -ENOENT; } @@ -1019,7 +1024,8 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, ba_win_sz, ssn, true); spin_unlock_bh(&ab->base_lock); if (ret) { - ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid); + ath11k_warn(ab, "failed to update reo for peer %pM rx tid %d\n: %d", + peer_mac, tid, ret); return ret; } @@ -1027,8 +1033,8 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, peer_mac, paddr, tid, 1, ba_win_sz); if (ret) - ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n", - tid, ret); + ath11k_warn(ab, "failed to send wmi rx reorder queue for peer %pM tid %d: %d\n", + peer_mac, tid, ret); return ret; } @@ -1044,54 +1050,48 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, else hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); - vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC); - if (!vaddr) { + rx_tid->unaligned_size = hw_desc_sz + HAL_LINK_DESC_ALIGN - 1; + vaddr_unaligned = dma_alloc_noncoherent(ab->dev, rx_tid->unaligned_size, &paddr, + DMA_BIDIRECTIONAL, GFP_ATOMIC); + if (!vaddr_unaligned) { spin_unlock_bh(&ab->base_lock); return -ENOMEM; } - addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); - - ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, - ssn, pn_type); - - paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz, - DMA_BIDIRECTIONAL); - - ret = dma_mapping_error(ab->dev, paddr); - if (ret) { - spin_unlock_bh(&ab->base_lock); - goto err_mem_free; - } - - rx_tid->vaddr = vaddr; - rx_tid->paddr = paddr; + rx_tid->vaddr_unaligned = vaddr_unaligned; + vaddr = PTR_ALIGN(vaddr_unaligned, HAL_LINK_DESC_ALIGN); + rx_tid->paddr_unaligned = paddr; + rx_tid->paddr = rx_tid->paddr_unaligned + ((unsigned long)vaddr - + (unsigned long)rx_tid->vaddr_unaligned); + ath11k_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type); rx_tid->size = hw_desc_sz; rx_tid->active = true; + /* After dma_alloc_noncoherent, vaddr is being modified for reo qdesc setup. + * Since these changes are not reflected in the device, driver now needs to + * explicitly call dma_sync_single_for_device. + */ + dma_sync_single_for_device(ab->dev, rx_tid->paddr, + rx_tid->size, + DMA_TO_DEVICE); spin_unlock_bh(&ab->base_lock); - ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, - paddr, tid, 1, ba_win_sz); + ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, rx_tid->paddr, + tid, 1, ba_win_sz); if (ret) { - ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n", - tid, ret); + ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n", + peer_mac, tid, ret); ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid); } return ret; - -err_mem_free: - kfree(vaddr); - - return ret; } int ath11k_dp_rx_ampdu_start(struct ath11k *ar, struct ieee80211_ampdu_params *params) { struct ath11k_base *ab = ar->ab; - struct ath11k_sta *arsta = (void *)params->sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta); int vdev_id = arsta->arvif->vdev_id; int ret; @@ -1109,7 +1109,7 @@ int ath11k_dp_rx_ampdu_stop(struct ath11k *ar, { struct ath11k_base *ab = ar->ab; struct ath11k_peer *peer; - struct ath11k_sta *arsta = (void *)params->sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta); int vdev_id = arsta->arvif->vdev_id; dma_addr_t paddr; bool active; @@ -1157,7 +1157,7 @@ int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif, { struct ath11k *ar = arvif->ar; struct ath11k_base *ab = ar->ab; - struct ath11k_hal_reo_cmd cmd = {0}; + struct ath11k_hal_reo_cmd cmd = {}; struct ath11k_peer *peer; struct dp_rx_tid *rx_tid; u8 tid; @@ -1248,7 +1248,7 @@ static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab, int cur_user; u16 peer_id; - ppdu_info = (struct htt_ppdu_stats_info *)data; + ppdu_info = data; switch (tag) { case HTT_PPDU_STATS_TAG_COMMON: @@ -1380,9 +1380,6 @@ ath11k_update_per_peer_tx_stats(struct ath11k *ar, u8 tid = HTT_PPDU_STATS_NON_QOS_TID; bool is_ampdu = false; - if (!usr_stats) - return; - if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) return; @@ -1451,7 +1448,7 @@ ath11k_update_per_peer_tx_stats(struct ath11k *ar, } sta = peer->sta; - arsta = (struct ath11k_sta *)sta->drv_priv; + arsta = ath11k_sta_to_arsta(sta); memset(&arsta->txrate, 0, sizeof(arsta->txrate)); @@ -1527,13 +1524,12 @@ struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar, { struct htt_ppdu_stats_info *ppdu_info; - spin_lock_bh(&ar->data_lock); + lockdep_assert_held(&ar->data_lock); + if (!list_empty(&ar->ppdu_stats_info)) { list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { - if (ppdu_info->ppdu_id == ppdu_id) { - spin_unlock_bh(&ar->data_lock); + if (ppdu_info->ppdu_id == ppdu_id) return ppdu_info; - } } if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { @@ -1545,16 +1541,13 @@ struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar, kfree(ppdu_info); } } - spin_unlock_bh(&ar->data_lock); ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC); if (!ppdu_info) return NULL; - spin_lock_bh(&ar->data_lock); list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); ar->ppdu_stat_list_depth++; - spin_unlock_bh(&ar->data_lock); return ppdu_info; } @@ -1578,16 +1571,17 @@ static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab, ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); if (!ar) { ret = -EINVAL; - goto exit; + goto out; } if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) trace_ath11k_htt_ppdu_stats(ar, skb->data, len); + spin_lock_bh(&ar->data_lock); ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id); if (!ppdu_info) { ret = -EINVAL; - goto exit; + goto out_unlock_data; } ppdu_info->ppdu_id = ppdu_id; @@ -1596,10 +1590,13 @@ static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab, (void *)ppdu_info); if (ret) { ath11k_warn(ab, "Failed to parse tlv %d\n", ret); - goto exit; + goto out_unlock_data; } -exit: +out_unlock_data: + spin_unlock_bh(&ar->data_lock); + +out: rcu_read_unlock(); return ret; @@ -1613,14 +1610,20 @@ static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb) u8 pdev_id; pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr); + + rcu_read_lock(); + ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); if (!ar) { ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id); - return; + goto out; } trace_ath11k_htt_pktlog(ar, data->payload, hdr->size, ar->ab->pktlog_defs_checksum); + +out: + rcu_read_unlock(); } static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab, @@ -1643,7 +1646,7 @@ static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab, backpressure_time = *data; - ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n", + ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n", pdev_id, ring_type, ring_id, hp, tp, backpressure_time); if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) { @@ -1865,8 +1868,7 @@ static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu) CHECKSUM_NONE : CHECKSUM_UNNECESSARY; } -static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, - enum hal_encrypt_type enctype) +int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype) { switch (enctype) { case HAL_ENCRYPT_TYPE_OPEN: @@ -2313,7 +2315,7 @@ static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc, u8 bw; u8 rate_mcs, nss; u8 sgi; - bool is_cck; + bool is_cck, is_ldpc; pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc); bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc); @@ -2355,6 +2357,9 @@ static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc, if (sgi) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); + is_ldpc = ath11k_dp_rx_h_msdu_start_ldpc_support(ar->ab, rx_desc); + if (is_ldpc) + rx_status->enc_flags |= RX_ENC_FLAG_LDPC; break; case RX_MSDU_START_PKT_TYPE_11AX: rx_status->rate_idx = rate_mcs; @@ -2397,7 +2402,7 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, rx_status->freq = center_freq; } else if (channel_num >= 1 && channel_num <= 14) { rx_status->band = NL80211_BAND_2GHZ; - } else if (channel_num >= 36 && channel_num <= 173) { + } else if (channel_num >= 36 && channel_num <= 177) { rx_status->band = NL80211_BAND_5GHZ; } else { spin_lock_bh(&ar->data_lock); @@ -2455,7 +2460,7 @@ static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *nap spin_unlock_bh(&ar->ab->base_lock); ath11k_dbg(ar->ab, ATH11K_DBG_DATA, - "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", + "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", msdu, msdu->len, peer ? peer->addr : NULL, @@ -2488,7 +2493,7 @@ static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *nap /* PN for multicast packets are not validate in HW, * so skip 802.3 rx path - * Also, fast_rx expectes the STA to be authorized, hence + * Also, fast_rx expects the STA to be authorized, hence * eapol packets are sent in slow path. */ if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol && @@ -2586,7 +2591,7 @@ static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab, { struct sk_buff *msdu; struct ath11k *ar; - struct ieee80211_rx_status rx_status = {0}; + struct ieee80211_rx_status rx_status = {}; int ret; if (skb_queue_empty(msdu_list)) @@ -2621,7 +2626,7 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, { struct ath11k_dp *dp = &ab->dp; struct dp_rxdma_ring *rx_ring; - int num_buffs_reaped[MAX_RADIOS] = {0}; + int num_buffs_reaped[MAX_RADIOS] = {}; struct sk_buff_head msdu_list[MAX_RADIOS]; struct ath11k_skb_rxcb *rxcb; int total_msdu_reaped = 0; @@ -2642,9 +2647,9 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, spin_lock_bh(&srng->lock); +try_again: ath11k_hal_srng_access_begin(ab, srng); -try_again: while (likely(desc = (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab, srng))) { @@ -2654,6 +2659,9 @@ try_again: cookie); mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie); + if (unlikely(buf_id == 0)) + continue; + ar = ab->pdevs[mac_id].ar; rx_ring = &ar->dp.rx_refill_buf_ring; spin_lock_bh(&rx_ring->idr_lock); @@ -2680,7 +2688,7 @@ try_again: if (unlikely(push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) { dev_kfree_skb_any(msdu); - ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++; + ab->soc_stats.hal_reo_error[ring_id]++; continue; } @@ -2755,6 +2763,9 @@ static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, if (!rx_stats) return; + arsta->rssi_comb = ppdu_info->rssi_comb; + ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb); + num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count + ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count; @@ -2811,8 +2822,6 @@ static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, rx_stats->dcm_count += ppdu_info->dcm; rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu; - arsta->rssi_comb = ppdu_info->rssi_comb; - BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) > ARRAY_SIZE(ppdu_info->rssi_chain_pri20)); @@ -2969,11 +2978,52 @@ ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon, } } +static enum dp_mon_status_buf_state +ath11k_dp_rx_mon_buf_done(struct ath11k_base *ab, struct hal_srng *srng, + struct dp_rxdma_ring *rx_ring) +{ + struct ath11k_skb_rxcb *rxcb; + struct hal_tlv_hdr *tlv; + struct sk_buff *skb; + void *status_desc; + dma_addr_t paddr; + u32 cookie; + int buf_id; + u8 rbm; + + status_desc = ath11k_hal_srng_src_next_peek(ab, srng); + if (!status_desc) + return DP_MON_STATUS_NO_DMA; + + ath11k_hal_rx_buf_addr_info_get(status_desc, &paddr, &cookie, &rbm); + + buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); + + spin_lock_bh(&rx_ring->idr_lock); + skb = idr_find(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + + if (!skb) + return DP_MON_STATUS_NO_DMA; + + rxcb = ATH11K_SKB_RXCB(skb); + dma_sync_single_for_cpu(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + + tlv = (struct hal_tlv_hdr *)skb->data; + if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_STATUS_BUFFER_DONE) + return DP_MON_STATUS_NO_DMA; + + return DP_MON_STATUS_REPLINISH; +} + static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, int *budget, struct sk_buff_head *skb_list) { struct ath11k *ar; const struct ath11k_hw_hal_params *hal_params; + enum dp_mon_status_buf_state reap_status; struct ath11k_pdev_dp *dp; struct dp_rxdma_ring *rx_ring; struct ath11k_mon_data *pmon; @@ -3015,39 +3065,74 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, spin_lock_bh(&rx_ring->idr_lock); skb = idr_find(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + if (!skb) { ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n", buf_id); - spin_unlock_bh(&rx_ring->idr_lock); pmon->buf_state = DP_MON_STATUS_REPLINISH; goto move_next; } - idr_remove(&rx_ring->bufs_idr, buf_id); - spin_unlock_bh(&rx_ring->idr_lock); - rxcb = ATH11K_SKB_RXCB(skb); - dma_unmap_single(ab->dev, rxcb->paddr, - skb->len + skb_tailroom(skb), - DMA_FROM_DEVICE); + dma_sync_single_for_cpu(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); tlv = (struct hal_tlv_hdr *)skb->data; if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_STATUS_BUFFER_DONE) { - ath11k_warn(ab, "mon status DONE not set %lx\n", + ath11k_warn(ab, "mon status DONE not set %lx, buf_id %d\n", FIELD_GET(HAL_TLV_HDR_TAG, - tlv->tl)); + tlv->tl), buf_id); + /* RxDMA status done bit might not be set even + * though tp is moved by HW. + */ + + /* If done status is missing: + * 1. As per MAC team's suggestion, + * when HP + 1 entry is peeked and if DMA + * is not done and if HP + 2 entry's DMA done + * is set. skip HP + 1 entry and + * start processing in next interrupt. + * 2. If HP + 2 entry's DMA done is not set, + * poll onto HP + 1 entry DMA done to be set. + * Check status for same buffer for next time + * dp_rx_mon_status_srng_process + */ + + reap_status = ath11k_dp_rx_mon_buf_done(ab, srng, + rx_ring); + if (reap_status == DP_MON_STATUS_NO_DMA) + continue; + + spin_lock_bh(&rx_ring->idr_lock); + idr_remove(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + + dma_unmap_single(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); - pmon->buf_state = DP_MON_STATUS_NO_DMA; + pmon->buf_state = DP_MON_STATUS_REPLINISH; goto move_next; } + spin_lock_bh(&rx_ring->idr_lock); + idr_remove(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); if (ab->hw_params.full_monitor_mode) { ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv); if (paddr == pmon->mon_status_paddr) pmon->buf_state = DP_MON_STATUS_MATCH; } + + dma_unmap_single(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + __skb_queue_tail(skb_list, skb); } else { pmon->buf_state = DP_MON_STATUS_REPLINISH; @@ -3080,82 +3165,10 @@ move_next: return num_buffs_reaped; } -int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, - struct napi_struct *napi, int budget) -{ - struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); - enum hal_rx_mon_status hal_status; - struct sk_buff *skb; - struct sk_buff_head skb_list; - struct hal_rx_mon_ppdu_info ppdu_info; - struct ath11k_peer *peer; - struct ath11k_sta *arsta; - int num_buffs_reaped = 0; - u32 rx_buf_sz; - u16 log_type = 0; - - __skb_queue_head_init(&skb_list); - - num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget, - &skb_list); - if (!num_buffs_reaped) - goto exit; - - memset(&ppdu_info, 0, sizeof(ppdu_info)); - ppdu_info.peer_id = HAL_INVALID_PEERID; - - while ((skb = __skb_dequeue(&skb_list))) { - if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) { - log_type = ATH11K_PKTLOG_TYPE_LITE_RX; - rx_buf_sz = DP_RX_BUFFER_SIZE_LITE; - } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) { - log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF; - rx_buf_sz = DP_RX_BUFFER_SIZE; - } - - if (log_type) - trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz); - - hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb); - - if (ppdu_info.peer_id == HAL_INVALID_PEERID || - hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { - dev_kfree_skb_any(skb); - continue; - } - - rcu_read_lock(); - spin_lock_bh(&ab->base_lock); - peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id); - - if (!peer || !peer->sta) { - ath11k_dbg(ab, ATH11K_DBG_DATA, - "failed to find the peer with peer_id %d\n", - ppdu_info.peer_id); - goto next_skb; - } - - arsta = (struct ath11k_sta *)peer->sta->drv_priv; - ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info); - - if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr)) - trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz); - -next_skb: - spin_unlock_bh(&ab->base_lock); - rcu_read_unlock(); - - dev_kfree_skb_any(skb); - memset(&ppdu_info, 0, sizeof(ppdu_info)); - ppdu_info.peer_id = HAL_INVALID_PEERID; - } -exit: - return num_buffs_reaped; -} - static void ath11k_dp_rx_frag_timer(struct timer_list *timer) { - struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer); + struct dp_rx_tid *rx_tid = timer_container_of(rx_tid, timer, + frag_timer); spin_lock_bh(&rx_tid->ab->base_lock); if (rx_tid->last_frag_no && @@ -3176,8 +3189,11 @@ int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id int i; tfm = crypto_alloc_shash("michael_mic", 0, 0); - if (IS_ERR(tfm)) + if (IS_ERR(tfm)) { + ath11k_warn(ab, "failed to allocate michael_mic shash: %ld\n", + PTR_ERR(tfm)); return PTR_ERR(tfm); + } spin_lock_bh(&ab->base_lock); @@ -3185,6 +3201,7 @@ int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id if (!peer) { ath11k_warn(ab, "failed to find the peer to set up fragment info\n"); spin_unlock_bh(&ab->base_lock); + crypto_free_shash(tfm); return -ENOENT; } @@ -3196,6 +3213,7 @@ int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id } peer->tfm_mmic = tfm; + peer->dp_setup_done = true; spin_unlock_bh(&ab->base_lock); return 0; @@ -3206,7 +3224,7 @@ static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key, size_t data_len, u8 *mic) { SHASH_DESC_ON_STACK(desc, tfm); - u8 mic_hdr[16] = {0}; + u8 mic_hdr[16] = {}; u8 tid = 0; int ret; @@ -3462,7 +3480,7 @@ static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_ti ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, ab->hw_params.hal_params->rx_buf_rbm); - /* Fill mpdu details into reo entrace ring */ + /* Fill mpdu details into reo entrance ring */ srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id]; spin_lock_bh(&srng->lock); @@ -3641,6 +3659,13 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, ret = -ENOENT; goto out_unlock; } + if (!peer->dp_setup_done) { + ath11k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n", + peer->addr, peer_id); + ret = -ENOENT; + goto out_unlock; + } + rx_tid = &peer->rx_tid[tid]; if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) || @@ -3656,7 +3681,7 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, goto out_unlock; } - if (frag_no > __fls(rx_tid->rx_frag_bitmap)) + if (!rx_tid->rx_frag_bitmap || (frag_no > __fls(rx_tid->rx_frag_bitmap))) __skb_queue_tail(&rx_tid->rx_frags, msdu); else ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu); @@ -3686,7 +3711,7 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, } spin_unlock_bh(&ab->base_lock); - del_timer_sync(&rx_tid->frag_timer); + timer_delete_sync(&rx_tid->frag_timer); spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find_by_id(ab, peer_id); @@ -3793,7 +3818,7 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi, struct dp_link_desc_bank *link_desc_banks; enum hal_rx_buf_return_buf_manager rbm; int tot_n_bufs_reaped, quota, ret, i; - int n_bufs_reaped[MAX_RADIOS] = {0}; + int n_bufs_reaped[MAX_RADIOS] = {}; struct dp_rxdma_ring *rx_ring; struct dp_srng *reo_except; u32 desc_bank, num_msdus; @@ -3837,6 +3862,7 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi, ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, &rbm); if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST && + rbm != HAL_RX_BUF_RBM_SW1_BM && rbm != HAL_RX_BUF_RBM_SW3_BM) { ab->soc_stats.invalid_rbm++; ath11k_warn(ab, "invalid return buffer manager %d\n", rbm); @@ -4073,7 +4099,7 @@ static void ath11k_dp_rx_wbm_err(struct ath11k *ar, struct sk_buff_head *msdu_list) { struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); - struct ieee80211_rx_status rxs = {0}; + struct ieee80211_rx_status rxs = {}; bool drop = true; switch (rxcb->err_rel_src) { @@ -4109,7 +4135,7 @@ int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab, struct ath11k_skb_rxcb *rxcb; u32 *rx_desc; int buf_id, mac_id; - int num_buffs_reaped[MAX_RADIOS] = {0}; + int num_buffs_reaped[MAX_RADIOS] = {}; int total_num_buffs_reaped = 0; int ret, i; @@ -4419,7 +4445,7 @@ int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id) } if (ab->hw_params.rx_mac_buf_ring) { - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = dp->rx_mac_buf_ring[i].ring_id; ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i, HAL_RXDMA_BUF); @@ -4431,7 +4457,7 @@ int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id) } } - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = dp->rxdma_err_dst_ring[i].ring_id; ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i, HAL_RXDMA_DST); @@ -4471,7 +4497,7 @@ int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id) } config_refill_ring: - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i, HAL_RXDMA_MONITOR_STATUS); @@ -4521,8 +4547,7 @@ int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar, src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng); if (src_srng_desc) { - struct ath11k_buffer_addr *src_desc = - (struct ath11k_buffer_addr *)src_srng_desc; + struct ath11k_buffer_addr *src_desc = src_srng_desc; *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info); } else { @@ -4541,8 +4566,7 @@ void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc, u8 *rbm, void **pp_buf_addr_info) { - struct hal_rx_msdu_link *msdu_link = - (struct hal_rx_msdu_link *)rx_msdu_link_desc; + struct hal_rx_msdu_link *msdu_link = rx_msdu_link_desc; struct ath11k_buffer_addr *buf_addr_info; buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info; @@ -4583,7 +4607,7 @@ static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar, u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1); u8 tmp = 0; - msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc; + msdu_link = msdu_link_desc; msdu_details = &msdu_link->msdu_link[0]; for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) { @@ -4591,7 +4615,6 @@ static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar, msdu_details[i].buf_addr_info.info0) == 0) { msdu_desc_info = &msdu_details[i - 1].rx_msdu_info; msdu_desc_info->info0 |= last; - ; break; } msdu_desc_info = &msdu_details[i].rx_msdu_info; @@ -4657,11 +4680,12 @@ static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info, } } -static u32 -ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, - void *ring_entry, struct sk_buff **head_msdu, - struct sk_buff **tail_msdu, u32 *npackets, - u32 *ppdu_id) +/* clang stack usage explodes if this is inlined */ +static noinline_for_stack +u32 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, + void *ring_entry, struct sk_buff **head_msdu, + struct sk_buff **tail_msdu, u32 *npackets, + u32 *ppdu_id) { struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; @@ -4680,8 +4704,7 @@ ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, bool is_frag, is_first_msdu; bool drop_mpdu = false; struct ath11k_skb_rxcb *rxcb; - struct hal_reo_entrance_ring *ent_desc = - (struct hal_reo_entrance_ring *)ring_entry; + struct hal_reo_entrance_ring *ent_desc = ring_entry; int buf_id; u32 rx_link_buf_info[2]; u8 rbm; @@ -4749,7 +4772,7 @@ ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, if (!msdu) { ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "msdu_pop: invalid buf_id %d\n", buf_id); - break; + goto next_msdu; } rxcb = ATH11K_SKB_RXCB(msdu); if (!rxcb->unmapped) { @@ -4870,7 +4893,6 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, { struct ath11k_base *ab = ar->ab; struct sk_buff *msdu, *prev_buf; - u32 wifi_hdr_len; struct hal_rx_desc *rx_desc; char *hdr_desc; u8 *dest, decap_format; @@ -4912,38 +4934,27 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN); } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) { - __le16 qos_field; u8 qos_pkt = 0; rx_desc = (struct hal_rx_desc *)head_msdu->data; hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc); /* Base size */ - wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr); wh = (struct ieee80211_hdr_3addr *)hdr_desc; - if (ieee80211_is_data_qos(wh->frame_control)) { - struct ieee80211_qos_hdr *qwh = - (struct ieee80211_qos_hdr *)hdr_desc; - - qos_field = qwh->qos_ctrl; + if (ieee80211_is_data_qos(wh->frame_control)) qos_pkt = 1; - } + msdu = head_msdu; while (msdu) { - rx_desc = (struct hal_rx_desc *)msdu->data; - hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc); - + ath11k_dp_rx_msdus_set_payload(ar, msdu); if (qos_pkt) { dest = skb_push(msdu, sizeof(__le16)); if (!dest) goto err_merge_fail; - memcpy(dest, hdr_desc, wifi_hdr_len); - memcpy(dest + wifi_hdr_len, - (u8 *)&qos_field, sizeof(__le16)); + memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr)); } - ath11k_dp_rx_msdus_set_payload(ar, msdu); prev_buf = msdu; msdu = msdu->next; } @@ -4952,7 +4963,7 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, goto err_merge_fail; ath11k_dbg(ab, ATH11K_DBG_DATA, - "mpdu_buf %pK mpdu_buf->len %u", + "mpdu_buf %p mpdu_buf->len %u", prev_buf, prev_buf->len); } else { ath11k_dbg(ab, ATH11K_DBG_DATA, @@ -4967,8 +4978,98 @@ err_merge_fail: return NULL; } +static void +ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status, + u8 *rtap_buf) +{ + u32 rtap_len = 0; + + put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]); +} + +static void +ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status, + u8 *rtap_buf) +{ + u32 rtap_len = 0; + + put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]); + rtap_len += 2; + + rtap_buf[rtap_len] = rx_status->he_RU[0]; + rtap_len += 1; + + rtap_buf[rtap_len] = rx_status->he_RU[1]; + rtap_len += 1; + + rtap_buf[rtap_len] = rx_status->he_RU[2]; + rtap_len += 1; + + rtap_buf[rtap_len] = rx_status->he_RU[3]; +} + +static void ath11k_update_radiotap(struct ath11k *ar, + struct hal_rx_mon_ppdu_info *ppduinfo, + struct sk_buff *mon_skb, + struct ieee80211_rx_status *rxs) +{ + struct ieee80211_supported_band *sband; + u8 *ptr = NULL; + + rxs->flag |= RX_FLAG_MACTIME_START; + rxs->signal = ppduinfo->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR; + + if (ppduinfo->nss) + rxs->nss = ppduinfo->nss; + + if (ppduinfo->he_mu_flags) { + rxs->flag |= RX_FLAG_RADIOTAP_HE_MU; + rxs->encoding = RX_ENC_HE; + ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu)); + ath11k_dp_rx_update_radiotap_he_mu(ppduinfo, ptr); + } else if (ppduinfo->he_flags) { + rxs->flag |= RX_FLAG_RADIOTAP_HE; + rxs->encoding = RX_ENC_HE; + ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he)); + ath11k_dp_rx_update_radiotap_he(ppduinfo, ptr); + rxs->rate_idx = ppduinfo->rate; + } else if (ppduinfo->vht_flags) { + rxs->encoding = RX_ENC_VHT; + rxs->rate_idx = ppduinfo->rate; + } else if (ppduinfo->ht_flags) { + rxs->encoding = RX_ENC_HT; + rxs->rate_idx = ppduinfo->rate; + } else { + rxs->encoding = RX_ENC_LEGACY; + sband = &ar->mac.sbands[rxs->band]; + rxs->rate_idx = ath11k_mac_hw_rate_to_idx(sband, ppduinfo->rate, + ppduinfo->cck_flag); + } + + rxs->mactime = ppduinfo->tsft; +} + static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, struct sk_buff *head_msdu, + struct hal_rx_mon_ppdu_info *ppduinfo, struct sk_buff *tail_msdu, struct napi_struct *napi) { @@ -5004,6 +5105,7 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, rxs->flag |= RX_FLAG_ALLOW_SAME_PN; } rxs->flag |= RX_FLAG_ONLY_MONITOR; + ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs); ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs); mon_skb = skb_next; @@ -5022,6 +5124,12 @@ mon_deliver_fail: return -EINVAL; } +/* The destination ring processing is stuck if the destination is not + * moving while status ring moves 16 PPDU. The destination ring processing + * skips this destination ring PPDU as a workaround. + */ +#define MON_DEST_RING_STUCK_MAX_CNT 16 + static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, u32 quota, struct napi_struct *napi) { @@ -5029,12 +5137,13 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; const struct ath11k_hw_hal_params *hal_params; void *ring_entry; - void *mon_dst_srng; + struct hal_srng *mon_dst_srng; u32 ppdu_id; u32 rx_bufs_used; u32 ring_id; struct ath11k_pdev_mon_stats *rx_mon_stats; u32 npackets = 0; + u32 mpdu_rx_bufs_used; if (ar->ab->hw_params.rxdma1_enable) ring_id = dp->rxdma_mon_dst_ring.ring_id; @@ -5043,15 +5152,9 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, mon_dst_srng = &ar->ab->hal.srng_list[ring_id]; - if (!mon_dst_srng) { - ath11k_warn(ar->ab, - "HAL Monitor Destination Ring Init Failed -- %pK", - mon_dst_srng); - return; - } - spin_lock_bh(&pmon->mon_lock); + spin_lock_bh(&mon_dst_srng->lock); ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); ppdu_id = pmon->mon_ppdu_info.ppdu_id; @@ -5064,20 +5167,44 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, head_msdu = NULL; tail_msdu = NULL; - rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry, - &head_msdu, - &tail_msdu, - &npackets, &ppdu_id); + mpdu_rx_bufs_used = ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry, + &head_msdu, + &tail_msdu, + &npackets, &ppdu_id); + + rx_bufs_used += mpdu_rx_bufs_used; + + if (mpdu_rx_bufs_used) { + dp->mon_dest_ring_stuck_cnt = 0; + } else { + dp->mon_dest_ring_stuck_cnt++; + rx_mon_stats->dest_mon_not_reaped++; + } + + if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) { + rx_mon_stats->dest_mon_stuck++; + ath11k_dbg(ar->ab, ATH11K_DBG_DATA, + "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n", + pmon->mon_ppdu_info.ppdu_id, ppdu_id, + dp->mon_dest_ring_stuck_cnt, + rx_mon_stats->dest_mon_not_reaped, + rx_mon_stats->dest_mon_stuck); + pmon->mon_ppdu_info.ppdu_id = ppdu_id; + continue; + } if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) { pmon->mon_ppdu_status = DP_PPDU_STATUS_START; ath11k_dbg(ar->ab, ATH11K_DBG_DATA, - "dest_rx: new ppdu_id %x != status ppdu_id %x", - ppdu_id, pmon->mon_ppdu_info.ppdu_id); + "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n", + ppdu_id, pmon->mon_ppdu_info.ppdu_id, + rx_mon_stats->dest_mon_not_reaped, + rx_mon_stats->dest_mon_stuck); break; } if (head_msdu && tail_msdu) { ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu, + &pmon->mon_ppdu_info, tail_msdu, napi); rx_mon_stats->dest_mpdu_done++; } @@ -5086,6 +5213,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, mon_dst_srng); } ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); + spin_unlock_bh(&mon_dst_srng->lock); spin_unlock_bh(&pmon->mon_lock); @@ -5106,36 +5234,96 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, } } -static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar, - int mac_id, u32 quota, - struct napi_struct *napi) +int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, + struct napi_struct *napi, int budget) { - struct ath11k_pdev_dp *dp = &ar->dp; - struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; - struct hal_rx_mon_ppdu_info *ppdu_info; - struct sk_buff *status_skb; - u32 tlv_status = HAL_TLV_STATUS_BUF_DONE; - struct ath11k_pdev_mon_stats *rx_mon_stats; + struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); + enum hal_rx_mon_status hal_status; + struct sk_buff *skb; + struct sk_buff_head skb_list; + struct ath11k_peer *peer; + struct ath11k_sta *arsta; + int num_buffs_reaped = 0; + u32 rx_buf_sz; + u16 log_type; + struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data; + struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats; + struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; - ppdu_info = &pmon->mon_ppdu_info; - rx_mon_stats = &pmon->rx_mon_stats; + __skb_queue_head_init(&skb_list); - if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START) - return; + num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget, + &skb_list); + if (!num_buffs_reaped) + goto exit; - while (!skb_queue_empty(&pmon->rx_status_q)) { - status_skb = skb_dequeue(&pmon->rx_status_q); + memset(ppdu_info, 0, sizeof(*ppdu_info)); + ppdu_info->peer_id = HAL_INVALID_PEERID; - tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info, - status_skb); - if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) { + while ((skb = __skb_dequeue(&skb_list))) { + if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) { + log_type = ATH11K_PKTLOG_TYPE_LITE_RX; + rx_buf_sz = DP_RX_BUFFER_SIZE_LITE; + } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) { + log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF; + rx_buf_sz = DP_RX_BUFFER_SIZE; + } else { + log_type = ATH11K_PKTLOG_TYPE_INVALID; + rx_buf_sz = 0; + } + + if (log_type != ATH11K_PKTLOG_TYPE_INVALID) + trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz); + + memset(ppdu_info, 0, sizeof(*ppdu_info)); + ppdu_info->peer_id = HAL_INVALID_PEERID; + hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb); + + if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) && + pmon->mon_ppdu_status == DP_PPDU_STATUS_START && + hal_status == HAL_TLV_STATUS_PPDU_DONE) { rx_mon_stats->status_ppdu_done++; pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE; - ath11k_dp_rx_mon_dest_process(ar, mac_id, quota, napi); - pmon->mon_ppdu_status = DP_PPDU_STATUS_START; + if (!ab->hw_params.full_monitor_mode) { + ath11k_dp_rx_mon_dest_process(ar, mac_id, + budget, napi); + pmon->mon_ppdu_status = DP_PPDU_STATUS_START; + } + } + + if (ppdu_info->peer_id == HAL_INVALID_PEERID || + hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { + dev_kfree_skb_any(skb); + continue; + } + + rcu_read_lock(); + spin_lock_bh(&ab->base_lock); + peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id); + + if (!peer || !peer->sta) { + ath11k_dbg(ab, ATH11K_DBG_DATA, + "failed to find the peer with peer_id %d\n", + ppdu_info->peer_id); + goto next_skb; } - dev_kfree_skb_any(status_skb); + + arsta = ath11k_sta_to_arsta(peer->sta); + ath11k_dp_rx_update_peer_stats(arsta, ppdu_info); + + if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr)) + trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz); + +next_skb: + spin_unlock_bh(&ab->base_lock); + rcu_read_unlock(); + + dev_kfree_skb_any(skb); + memset(ppdu_info, 0, sizeof(*ppdu_info)); + ppdu_info->peer_id = HAL_INVALID_PEERID; } +exit: + return num_buffs_reaped; } static u32 @@ -5213,7 +5401,7 @@ ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar, "full mon msdu_pop: invalid buf_id %d\n", buf_id); spin_unlock_bh(&rx_ring->idr_lock); - break; + goto next_msdu; } idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); @@ -5352,6 +5540,7 @@ static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar, tail_msdu = mon_mpdu->tail; if (head_msdu && tail_msdu) { ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu, + &pmon->mon_ppdu_info, tail_msdu, napi); rx_mon_stats->dest_mpdu_done++; ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n"); @@ -5414,7 +5603,7 @@ static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id, struct hal_sw_mon_ring_entries *sw_mon_entries; struct ath11k_pdev_mon_stats *rx_mon_stats; struct sk_buff *head_msdu, *tail_msdu; - void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id]; + struct hal_srng *mon_dst_srng; void *ring_entry; u32 rx_bufs_used = 0, mpdu_rx_bufs_used; int quota = 0, ret; @@ -5430,6 +5619,9 @@ static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id, goto reap_status_ring; } + mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id]; + spin_lock_bh(&mon_dst_srng->lock); + ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { head_msdu = NULL; @@ -5473,6 +5665,7 @@ next_entry: } ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); + spin_unlock_bh(&mon_dst_srng->lock); spin_unlock_bh(&pmon->mon_lock); if (rx_bufs_used) { @@ -5489,22 +5682,6 @@ reap_status_ring: return quota; } -static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id, - struct napi_struct *napi, int budget) -{ - struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); - struct ath11k_pdev_dp *dp = &ar->dp; - struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; - int num_buffs_reaped = 0; - - num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, mac_id, &budget, - &pmon->rx_status_q); - if (num_buffs_reaped) - ath11k_dp_rx_mon_status_process_tlv(ar, mac_id, budget, napi); - - return num_buffs_reaped; -} - int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id, struct napi_struct *napi, int budget) { @@ -5514,8 +5691,6 @@ int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id, if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) && ab->hw_params.full_monitor_mode) ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget); - else if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) - ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget); else ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget); @@ -5527,8 +5702,6 @@ static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar) struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; - skb_queue_head_init(&pmon->rx_status_q); - pmon->mon_ppdu_status = DP_PPDU_STATUS_START; memset(&pmon->rx_mon_stats, 0, @@ -5608,7 +5781,7 @@ int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer) int ret; if (stop_timer) - del_timer_sync(&ab->mon_reap_timer); + timer_delete_sync(&ab->mon_reap_timer); /* reap all the monitor related rings */ ret = ath11k_dp_purge_mon_ring(ab); |
