diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath12k/dp.c')
-rw-r--r-- | drivers/net/wireless/ath/ath12k/dp.c | 291 |
1 files changed, 181 insertions, 110 deletions
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c index 50c36e6ea102..f893fce6d9bd 100644 --- a/drivers/net/wireless/ath/ath12k/dp.c +++ b/drivers/net/wireless/ath/ath12k/dp.c @@ -101,7 +101,7 @@ peer_clean: return -ENOENT; } - for (; tid >= 0; tid--) + for (tid--; tid >= 0; tid--) ath12k_dp_rx_peer_tid_delete(ar, peer, tid); spin_unlock_bh(&ab->base_lock); @@ -168,6 +168,8 @@ static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab, grp_mask = &ab->hw_params->ring_mask->reo_status[0]; break; case HAL_RXDMA_MONITOR_STATUS: + grp_mask = &ab->hw_params->ring_mask->rx_mon_status[0]; + break; case HAL_RXDMA_MONITOR_DST: grp_mask = &ab->hw_params->ring_mask->rx_mon_dest[0]; break; @@ -239,7 +241,7 @@ int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring, enum hal_ring_type type, int ring_num, int mac_id, int num_entries) { - struct hal_srng_params params = { 0 }; + struct hal_srng_params params = {}; int entry_sz = ath12k_hal_srng_get_entrysize(ab, type); int max_entries = ath12k_hal_srng_get_max_entries(ab, type); int ret; @@ -274,12 +276,17 @@ int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring, break; case HAL_RXDMA_BUF: case HAL_RXDMA_MONITOR_BUF: - case HAL_RXDMA_MONITOR_STATUS: params.low_threshold = num_entries >> 3; params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN; params.intr_batch_cntr_thres_entries = 0; params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX; break; + case HAL_RXDMA_MONITOR_STATUS: + params.low_threshold = num_entries >> 3; + params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN; + params.intr_batch_cntr_thres_entries = 1; + params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX; + break; case HAL_TX_MONITOR_DST: params.low_threshold = DP_TX_MONITOR_BUF_SIZE_MAX >> 3; params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN; @@ -354,7 +361,10 @@ u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, u32_encode_bits(0, HAL_TX_BANK_CONFIG_EPD); /* only valid if idx_lookup_override is not set in tcl_data_cmd */ - bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN); + if (ahvif->vdev_type == WMI_VDEV_TYPE_STA) + bank_config |= u32_encode_bits(1, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN); + else + bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN); bank_config |= u32_encode_bits(arvif->hal_addr_search_flags & HAL_TX_ADDRX_EN, HAL_TX_BANK_CONFIG_ADDRX_EN) | @@ -510,7 +520,7 @@ static int ath12k_dp_srng_common_setup(struct ath12k_base *ab) ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring, HAL_WBM2SW_RELEASE, tx_comp_ring_num, 0, - DP_TX_COMP_RING_SIZE); + DP_TX_COMP_RING_SIZE(ab)); if (ret) { ath12k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n", tx_comp_ring_num, ret); @@ -919,6 +929,25 @@ int ath12k_dp_service_srng(struct ath12k_base *ab, goto done; } + if (ab->hw_params->ring_mask->rx_mon_status[grp_id]) { + ring_mask = ab->hw_params->ring_mask->rx_mon_status[grp_id]; + for (i = 0; i < ab->num_radios; i++) { + for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) { + int id = i * ab->hw_params->num_rxdma_per_pdev + j; + + if (ring_mask & BIT(id)) { + work_done = + ath12k_dp_mon_process_ring(ab, id, napi, budget, + 0); + budget -= work_done; + tot_work_done += work_done; + if (budget <= 0) + goto done; + } + } + } + } + if (ab->hw_params->ring_mask->rx_mon_dest[grp_id]) { monitor_mode = ATH12K_DP_RX_MONITOR_MODE; ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id]; @@ -982,11 +1011,6 @@ void ath12k_dp_pdev_free(struct ath12k_base *ab) { int i; - if (!ab->mon_reap_timer.function) - return; - - timer_delete_sync(&ab->mon_reap_timer); - for (i = 0; i < ab->num_radios; i++) ath12k_dp_rx_pdev_free(ab, i); } @@ -1024,27 +1048,6 @@ void ath12k_dp_hal_rx_desc_init(struct ath12k_base *ab) ab->hal_rx_ops->rx_desc_get_desc_size(); } -static void ath12k_dp_service_mon_ring(struct timer_list *t) -{ - struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer); - int i; - - for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) - ath12k_dp_mon_process_ring(ab, i, NULL, DP_MON_SERVICE_BUDGET, - ATH12K_DP_RX_MONITOR_MODE); - - mod_timer(&ab->mon_reap_timer, jiffies + - msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL)); -} - -static void ath12k_dp_mon_reap_timer_init(struct ath12k_base *ab) -{ - if (ab->hw_params->rxdma1_enable) - return; - - timer_setup(&ab->mon_reap_timer, ath12k_dp_service_mon_ring, 0); -} - int ath12k_dp_pdev_alloc(struct ath12k_base *ab) { struct ath12k *ar; @@ -1055,8 +1058,6 @@ int ath12k_dp_pdev_alloc(struct ath12k_base *ab) if (ret) goto out; - ath12k_dp_mon_reap_timer_init(ab); - /* TODO: Per-pdev rx ring unlike tx ring which is mapped to different AC's */ for (i = 0; i < ab->num_radios; i++) { ar = ab->pdevs[i].ar; @@ -1082,8 +1083,8 @@ out: int ath12k_dp_htt_connect(struct ath12k_dp *dp) { - struct ath12k_htc_svc_conn_req conn_req = {0}; - struct ath12k_htc_svc_conn_resp conn_resp = {0}; + struct ath12k_htc_svc_conn_req conn_req = {}; + struct ath12k_htc_svc_conn_resp conn_resp = {}; int status; conn_req.ep_ops.ep_tx_complete = ath12k_dp_htt_htc_tx_complete; @@ -1107,11 +1108,8 @@ static void ath12k_dp_update_vdev_search(struct ath12k_link_vif *arvif) { switch (arvif->ahvif->vdev_type) { case WMI_VDEV_TYPE_STA: - /* TODO: Verify the search type and flags since ast hash - * is not part of peer mapv3 - */ arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN; - arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT; + arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX; break; case WMI_VDEV_TYPE_AP: case WMI_VDEV_TYPE_IBSS: @@ -1165,31 +1163,36 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab) /* RX Descriptor cleanup */ spin_lock_bh(&dp->rx_desc_lock); - for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) { - desc_info = dp->rxbaddr[i]; - - for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) { - if (!desc_info[j].in_use) { - list_del(&desc_info[j].list); + if (dp->rxbaddr) { + for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES(ab); i++) { + if (!dp->rxbaddr[i]) continue; - } - skb = desc_info[j].skb; - if (!skb) - continue; + desc_info = dp->rxbaddr[i]; - dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr, - skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); - dev_kfree_skb_any(skb); - } - } + for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) { + if (!desc_info[j].in_use) { + list_del(&desc_info[j].list); + continue; + } - for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) { - if (!dp->rxbaddr[i]) - continue; + skb = desc_info[j].skb; + if (!skb) + continue; + + dma_unmap_single(ab->dev, + ATH12K_SKB_RXCB(skb)->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + } - kfree(dp->rxbaddr[i]); - dp->rxbaddr[i] = NULL; + kfree(dp->rxbaddr[i]); + dp->rxbaddr[i] = NULL; + } + + kfree(dp->rxbaddr); + dp->rxbaddr = NULL; } spin_unlock_bh(&dp->rx_desc_lock); @@ -1198,19 +1201,27 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab) for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) { spin_lock_bh(&dp->tx_desc_lock[i]); - list_for_each_entry_safe(tx_desc_info, tmp1, &dp->tx_desc_used_list[i], - list) { + list_for_each_entry_safe(tx_desc_info, tmp1, + &dp->tx_desc_used_list[i], list) { list_del(&tx_desc_info->list); skb = tx_desc_info->skb; if (!skb) continue; + skb_cb = ATH12K_SKB_CB(skb); + if (skb_cb->paddr_ext_desc) { + dma_unmap_single(ab->dev, + skb_cb->paddr_ext_desc, + tx_desc_info->skb_ext_desc->len, + DMA_TO_DEVICE); + dev_kfree_skb_any(tx_desc_info->skb_ext_desc); + } + /* if we are unregistering, hw would've been destroyed and * ar is no longer valid. */ if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags))) { - skb_cb = ATH12K_SKB_CB(skb); ar = skb_cb->ar; if (atomic_dec_and_test(&ar->dp.num_tx_pending)) @@ -1225,19 +1236,25 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab) spin_unlock_bh(&dp->tx_desc_lock[i]); } - for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) { - spin_lock_bh(&dp->tx_desc_lock[pool_id]); + if (dp->txbaddr) { + for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) { + spin_lock_bh(&dp->tx_desc_lock[pool_id]); - for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) { - tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL; - if (!dp->txbaddr[tx_spt_page]) - continue; + for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL(ab); i++) { + tx_spt_page = i + pool_id * + ATH12K_TX_SPT_PAGES_PER_POOL(ab); + if (!dp->txbaddr[tx_spt_page]) + continue; + + kfree(dp->txbaddr[tx_spt_page]); + dp->txbaddr[tx_spt_page] = NULL; + } - kfree(dp->txbaddr[tx_spt_page]); - dp->txbaddr[tx_spt_page] = NULL; + spin_unlock_bh(&dp->tx_desc_lock[pool_id]); } - spin_unlock_bh(&dp->tx_desc_lock[pool_id]); + kfree(dp->txbaddr); + dp->txbaddr = NULL; } /* unmap SPT pages */ @@ -1261,22 +1278,24 @@ static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab) if (!ab->hw_params->reoq_lut_support) return; - if (dp->reoq_lut.vaddr) { + if (dp->reoq_lut.vaddr_unaligned) { ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab), 0); - dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE, - dp->reoq_lut.vaddr, dp->reoq_lut.paddr); - dp->reoq_lut.vaddr = NULL; + dma_free_coherent(ab->dev, dp->reoq_lut.size, + dp->reoq_lut.vaddr_unaligned, + dp->reoq_lut.paddr_unaligned); + dp->reoq_lut.vaddr_unaligned = NULL; } - if (dp->ml_reoq_lut.vaddr) { + if (dp->ml_reoq_lut.vaddr_unaligned) { ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE1(ab), 0); - dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE, - dp->ml_reoq_lut.vaddr, dp->ml_reoq_lut.paddr); - dp->ml_reoq_lut.vaddr = NULL; + dma_free_coherent(ab->dev, dp->ml_reoq_lut.size, + dp->ml_reoq_lut.vaddr_unaligned, + dp->ml_reoq_lut.paddr_unaligned); + dp->ml_reoq_lut.vaddr_unaligned = NULL; } } @@ -1384,8 +1403,8 @@ struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab, ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT); spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT); - start_ppt_idx = dp->rx_ppt_base + ATH12K_RX_SPT_PAGE_OFFSET; - end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES; + start_ppt_idx = dp->rx_ppt_base + ATH12K_RX_SPT_PAGE_OFFSET(ab); + end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES(ab); if (ppt_idx < start_ppt_idx || ppt_idx >= end_ppt_idx || @@ -1409,7 +1428,7 @@ struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab, start_ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET; end_ppt_idx = start_ppt_idx + - (ATH12K_TX_SPT_PAGES_PER_POOL * ATH12K_HW_MAX_QUEUES); + (ATH12K_TX_SPT_PAGES_PER_POOL(ab) * ATH12K_HW_MAX_QUEUES); if (ppt_idx < start_ppt_idx || ppt_idx >= end_ppt_idx || @@ -1426,13 +1445,24 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab) struct ath12k_dp *dp = &ab->dp; struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr; struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr; + u32 num_rx_spt_pages = ATH12K_NUM_RX_SPT_PAGES(ab); u32 i, j, pool_id, tx_spt_page; u32 ppt_idx, cookie_ppt_idx; spin_lock_bh(&dp->rx_desc_lock); - /* First ATH12K_NUM_RX_SPT_PAGES of allocated SPT pages are used for RX */ - for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) { + dp->rxbaddr = kcalloc(num_rx_spt_pages, + sizeof(struct ath12k_rx_desc_info *), GFP_ATOMIC); + + if (!dp->rxbaddr) { + spin_unlock_bh(&dp->rx_desc_lock); + return -ENOMEM; + } + + /* First ATH12K_NUM_RX_SPT_PAGES(ab) of allocated SPT pages are used for + * RX + */ + for (i = 0; i < num_rx_spt_pages; i++) { rx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*rx_descs), GFP_ATOMIC); @@ -1441,7 +1471,7 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab) return -ENOMEM; } - ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET + i; + ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET(ab) + i; cookie_ppt_idx = dp->rx_ppt_base + ppt_idx; dp->rxbaddr[i] = &rx_descs[0]; @@ -1459,9 +1489,15 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab) spin_unlock_bh(&dp->rx_desc_lock); + dp->txbaddr = kcalloc(ATH12K_NUM_TX_SPT_PAGES(ab), + sizeof(struct ath12k_tx_desc_info *), GFP_ATOMIC); + + if (!dp->txbaddr) + return -ENOMEM; + for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) { spin_lock_bh(&dp->tx_desc_lock[pool_id]); - for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) { + for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL(ab); i++) { tx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*tx_descs), GFP_ATOMIC); @@ -1471,7 +1507,8 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab) return -ENOMEM; } - tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL; + tx_spt_page = i + pool_id * + ATH12K_TX_SPT_PAGES_PER_POOL(ab); ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET + tx_spt_page; dp->txbaddr[tx_spt_page] = &tx_descs[0]; @@ -1505,12 +1542,12 @@ static int ath12k_dp_cmem_init(struct ath12k_base *ab, switch (type) { case ATH12K_DP_TX_DESC: start = ATH12K_TX_SPT_PAGE_OFFSET; - end = start + ATH12K_NUM_TX_SPT_PAGES; + end = start + ATH12K_NUM_TX_SPT_PAGES(ab); break; case ATH12K_DP_RX_DESC: cmem_base += ATH12K_PPT_ADDR_OFFSET(dp->rx_ppt_base); - start = ATH12K_RX_SPT_PAGE_OFFSET; - end = start + ATH12K_NUM_RX_SPT_PAGES; + start = ATH12K_RX_SPT_PAGE_OFFSET(ab); + end = start + ATH12K_NUM_RX_SPT_PAGES(ab); break; default: ath12k_err(ab, "invalid descriptor type %d in cmem init\n", type); @@ -1538,6 +1575,11 @@ void ath12k_dp_partner_cc_init(struct ath12k_base *ab) } } +static u32 ath12k_dp_get_num_spt_pages(struct ath12k_base *ab) +{ + return ATH12K_NUM_RX_SPT_PAGES(ab) + ATH12K_NUM_TX_SPT_PAGES(ab); +} + static int ath12k_dp_cc_init(struct ath12k_base *ab) { struct ath12k_dp *dp = &ab->dp; @@ -1552,7 +1594,7 @@ static int ath12k_dp_cc_init(struct ath12k_base *ab) spin_lock_init(&dp->tx_desc_lock[i]); } - dp->num_spt_pages = ATH12K_NUM_SPT_PAGES; + dp->num_spt_pages = ath12k_dp_get_num_spt_pages(ab); if (dp->num_spt_pages > ATH12K_MAX_PPT_ENTRIES) dp->num_spt_pages = ATH12K_MAX_PPT_ENTRIES; @@ -1564,7 +1606,7 @@ static int ath12k_dp_cc_init(struct ath12k_base *ab) return -ENOMEM; } - dp->rx_ppt_base = ab->device_id * ATH12K_NUM_RX_SPT_PAGES; + dp->rx_ppt_base = ab->device_id * ATH12K_NUM_RX_SPT_PAGES(ab); for (i = 0; i < dp->num_spt_pages; i++) { dp->spt_info[i].vaddr = dma_alloc_coherent(ab->dev, @@ -1608,39 +1650,67 @@ free: return ret; } +static int ath12k_dp_alloc_reoq_lut(struct ath12k_base *ab, + struct ath12k_reo_q_addr_lut *lut) +{ + lut->size = DP_REOQ_LUT_SIZE + HAL_REO_QLUT_ADDR_ALIGN - 1; + lut->vaddr_unaligned = dma_alloc_coherent(ab->dev, lut->size, + &lut->paddr_unaligned, + GFP_KERNEL | __GFP_ZERO); + if (!lut->vaddr_unaligned) + return -ENOMEM; + + lut->vaddr = PTR_ALIGN(lut->vaddr_unaligned, HAL_REO_QLUT_ADDR_ALIGN); + lut->paddr = lut->paddr_unaligned + + ((unsigned long)lut->vaddr - (unsigned long)lut->vaddr_unaligned); + return 0; +} + static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab) { struct ath12k_dp *dp = &ab->dp; + u32 val; + int ret; if (!ab->hw_params->reoq_lut_support) return 0; - dp->reoq_lut.vaddr = dma_alloc_coherent(ab->dev, - DP_REOQ_LUT_SIZE, - &dp->reoq_lut.paddr, - GFP_KERNEL | __GFP_ZERO); - if (!dp->reoq_lut.vaddr) { + ret = ath12k_dp_alloc_reoq_lut(ab, &dp->reoq_lut); + if (ret) { ath12k_warn(ab, "failed to allocate memory for reoq table"); - return -ENOMEM; + return ret; } - dp->ml_reoq_lut.vaddr = dma_alloc_coherent(ab->dev, - DP_REOQ_LUT_SIZE, - &dp->ml_reoq_lut.paddr, - GFP_KERNEL | __GFP_ZERO); - if (!dp->ml_reoq_lut.vaddr) { + ret = ath12k_dp_alloc_reoq_lut(ab, &dp->ml_reoq_lut); + if (ret) { ath12k_warn(ab, "failed to allocate memory for ML reoq table"); - dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE, - dp->reoq_lut.vaddr, dp->reoq_lut.paddr); - dp->reoq_lut.vaddr = NULL; - return -ENOMEM; + dma_free_coherent(ab->dev, dp->reoq_lut.size, + dp->reoq_lut.vaddr_unaligned, + dp->reoq_lut.paddr_unaligned); + dp->reoq_lut.vaddr_unaligned = NULL; + return ret; } + /* Bits in the register have address [39:8] LUT base address to be + * allocated such that LSBs are assumed to be zero. Also, current + * design supports paddr up to 4 GB max hence it fits in 32 bit + * register only + */ + ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab), - dp->reoq_lut.paddr); + dp->reoq_lut.paddr >> 8); + ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE1(ab), dp->ml_reoq_lut.paddr >> 8); + val = ath12k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_ADDR(ab)); + + ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_ADDR(ab), + val | HAL_REO_QDESC_ADDR_READ_LUT_ENABLE); + + ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_MAX_PEERID(ab), + HAL_REO_QDESC_MAX_PEERID); + return 0; } @@ -1711,7 +1781,8 @@ int ath12k_dp_alloc(struct ath12k_base *ab) if (ret) goto fail_dp_bank_profiles_cleanup; - size = sizeof(struct hal_wbm_release_ring_tx) * DP_TX_COMP_RING_SIZE; + size = sizeof(struct hal_wbm_release_ring_tx) * + DP_TX_COMP_RING_SIZE(ab); ret = ath12k_dp_reoq_lut_setup(ab); if (ret) { @@ -1723,7 +1794,7 @@ int ath12k_dp_alloc(struct ath12k_base *ab) dp->tx_ring[i].tcl_data_ring_id = i; dp->tx_ring[i].tx_status_head = 0; - dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1; + dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE(ab) - 1; dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL); if (!dp->tx_ring[i].tx_status) { ret = -ENOMEM; |