summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/ath/ath12k/dp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath12k/dp.c')
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c137
1 files changed, 86 insertions, 51 deletions
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
index 6317c6d4c043..f893fce6d9bd 100644
--- a/drivers/net/wireless/ath/ath12k/dp.c
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -101,7 +101,7 @@ peer_clean:
return -ENOENT;
}
- for (; tid >= 0; tid--)
+ for (tid--; tid >= 0; tid--)
ath12k_dp_rx_peer_tid_delete(ar, peer, tid);
spin_unlock_bh(&ab->base_lock);
@@ -241,7 +241,7 @@ int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring,
enum hal_ring_type type, int ring_num,
int mac_id, int num_entries)
{
- struct hal_srng_params params = { 0 };
+ struct hal_srng_params params = {};
int entry_sz = ath12k_hal_srng_get_entrysize(ab, type);
int max_entries = ath12k_hal_srng_get_max_entries(ab, type);
int ret;
@@ -520,7 +520,7 @@ static int ath12k_dp_srng_common_setup(struct ath12k_base *ab)
ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
HAL_WBM2SW_RELEASE, tx_comp_ring_num, 0,
- DP_TX_COMP_RING_SIZE);
+ DP_TX_COMP_RING_SIZE(ab));
if (ret) {
ath12k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
tx_comp_ring_num, ret);
@@ -1083,8 +1083,8 @@ out:
int ath12k_dp_htt_connect(struct ath12k_dp *dp)
{
- struct ath12k_htc_svc_conn_req conn_req = {0};
- struct ath12k_htc_svc_conn_resp conn_resp = {0};
+ struct ath12k_htc_svc_conn_req conn_req = {};
+ struct ath12k_htc_svc_conn_resp conn_resp = {};
int status;
conn_req.ep_ops.ep_tx_complete = ath12k_dp_htt_htc_tx_complete;
@@ -1163,31 +1163,36 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
/* RX Descriptor cleanup */
spin_lock_bh(&dp->rx_desc_lock);
- for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
- desc_info = dp->rxbaddr[i];
-
- for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
- if (!desc_info[j].in_use) {
- list_del(&desc_info[j].list);
+ if (dp->rxbaddr) {
+ for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES(ab); i++) {
+ if (!dp->rxbaddr[i])
continue;
- }
- skb = desc_info[j].skb;
- if (!skb)
- continue;
+ desc_info = dp->rxbaddr[i];
- dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
- skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- }
- }
+ for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
+ if (!desc_info[j].in_use) {
+ list_del(&desc_info[j].list);
+ continue;
+ }
- for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
- if (!dp->rxbaddr[i])
- continue;
+ skb = desc_info[j].skb;
+ if (!skb)
+ continue;
+
+ dma_unmap_single(ab->dev,
+ ATH12K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+
+ kfree(dp->rxbaddr[i]);
+ dp->rxbaddr[i] = NULL;
+ }
- kfree(dp->rxbaddr[i]);
- dp->rxbaddr[i] = NULL;
+ kfree(dp->rxbaddr);
+ dp->rxbaddr = NULL;
}
spin_unlock_bh(&dp->rx_desc_lock);
@@ -1196,8 +1201,8 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
spin_lock_bh(&dp->tx_desc_lock[i]);
- list_for_each_entry_safe(tx_desc_info, tmp1, &dp->tx_desc_used_list[i],
- list) {
+ list_for_each_entry_safe(tx_desc_info, tmp1,
+ &dp->tx_desc_used_list[i], list) {
list_del(&tx_desc_info->list);
skb = tx_desc_info->skb;
@@ -1231,19 +1236,25 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
spin_unlock_bh(&dp->tx_desc_lock[i]);
}
- for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
- spin_lock_bh(&dp->tx_desc_lock[pool_id]);
+ if (dp->txbaddr) {
+ for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
+ spin_lock_bh(&dp->tx_desc_lock[pool_id]);
- for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
- tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
- if (!dp->txbaddr[tx_spt_page])
- continue;
+ for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL(ab); i++) {
+ tx_spt_page = i + pool_id *
+ ATH12K_TX_SPT_PAGES_PER_POOL(ab);
+ if (!dp->txbaddr[tx_spt_page])
+ continue;
+
+ kfree(dp->txbaddr[tx_spt_page]);
+ dp->txbaddr[tx_spt_page] = NULL;
+ }
- kfree(dp->txbaddr[tx_spt_page]);
- dp->txbaddr[tx_spt_page] = NULL;
+ spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
}
- spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
+ kfree(dp->txbaddr);
+ dp->txbaddr = NULL;
}
/* unmap SPT pages */
@@ -1392,8 +1403,8 @@ struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT);
- start_ppt_idx = dp->rx_ppt_base + ATH12K_RX_SPT_PAGE_OFFSET;
- end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES;
+ start_ppt_idx = dp->rx_ppt_base + ATH12K_RX_SPT_PAGE_OFFSET(ab);
+ end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES(ab);
if (ppt_idx < start_ppt_idx ||
ppt_idx >= end_ppt_idx ||
@@ -1417,7 +1428,7 @@ struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
start_ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET;
end_ppt_idx = start_ppt_idx +
- (ATH12K_TX_SPT_PAGES_PER_POOL * ATH12K_HW_MAX_QUEUES);
+ (ATH12K_TX_SPT_PAGES_PER_POOL(ab) * ATH12K_HW_MAX_QUEUES);
if (ppt_idx < start_ppt_idx ||
ppt_idx >= end_ppt_idx ||
@@ -1434,13 +1445,24 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
struct ath12k_dp *dp = &ab->dp;
struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr;
struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr;
+ u32 num_rx_spt_pages = ATH12K_NUM_RX_SPT_PAGES(ab);
u32 i, j, pool_id, tx_spt_page;
u32 ppt_idx, cookie_ppt_idx;
spin_lock_bh(&dp->rx_desc_lock);
- /* First ATH12K_NUM_RX_SPT_PAGES of allocated SPT pages are used for RX */
- for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
+ dp->rxbaddr = kcalloc(num_rx_spt_pages,
+ sizeof(struct ath12k_rx_desc_info *), GFP_ATOMIC);
+
+ if (!dp->rxbaddr) {
+ spin_unlock_bh(&dp->rx_desc_lock);
+ return -ENOMEM;
+ }
+
+ /* First ATH12K_NUM_RX_SPT_PAGES(ab) of allocated SPT pages are used for
+ * RX
+ */
+ for (i = 0; i < num_rx_spt_pages; i++) {
rx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*rx_descs),
GFP_ATOMIC);
@@ -1449,7 +1471,7 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
return -ENOMEM;
}
- ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET + i;
+ ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET(ab) + i;
cookie_ppt_idx = dp->rx_ppt_base + ppt_idx;
dp->rxbaddr[i] = &rx_descs[0];
@@ -1467,9 +1489,15 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
spin_unlock_bh(&dp->rx_desc_lock);
+ dp->txbaddr = kcalloc(ATH12K_NUM_TX_SPT_PAGES(ab),
+ sizeof(struct ath12k_tx_desc_info *), GFP_ATOMIC);
+
+ if (!dp->txbaddr)
+ return -ENOMEM;
+
for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
spin_lock_bh(&dp->tx_desc_lock[pool_id]);
- for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
+ for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL(ab); i++) {
tx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*tx_descs),
GFP_ATOMIC);
@@ -1479,7 +1507,8 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
return -ENOMEM;
}
- tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
+ tx_spt_page = i + pool_id *
+ ATH12K_TX_SPT_PAGES_PER_POOL(ab);
ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET + tx_spt_page;
dp->txbaddr[tx_spt_page] = &tx_descs[0];
@@ -1513,12 +1542,12 @@ static int ath12k_dp_cmem_init(struct ath12k_base *ab,
switch (type) {
case ATH12K_DP_TX_DESC:
start = ATH12K_TX_SPT_PAGE_OFFSET;
- end = start + ATH12K_NUM_TX_SPT_PAGES;
+ end = start + ATH12K_NUM_TX_SPT_PAGES(ab);
break;
case ATH12K_DP_RX_DESC:
cmem_base += ATH12K_PPT_ADDR_OFFSET(dp->rx_ppt_base);
- start = ATH12K_RX_SPT_PAGE_OFFSET;
- end = start + ATH12K_NUM_RX_SPT_PAGES;
+ start = ATH12K_RX_SPT_PAGE_OFFSET(ab);
+ end = start + ATH12K_NUM_RX_SPT_PAGES(ab);
break;
default:
ath12k_err(ab, "invalid descriptor type %d in cmem init\n", type);
@@ -1546,6 +1575,11 @@ void ath12k_dp_partner_cc_init(struct ath12k_base *ab)
}
}
+static u32 ath12k_dp_get_num_spt_pages(struct ath12k_base *ab)
+{
+ return ATH12K_NUM_RX_SPT_PAGES(ab) + ATH12K_NUM_TX_SPT_PAGES(ab);
+}
+
static int ath12k_dp_cc_init(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
@@ -1560,7 +1594,7 @@ static int ath12k_dp_cc_init(struct ath12k_base *ab)
spin_lock_init(&dp->tx_desc_lock[i]);
}
- dp->num_spt_pages = ATH12K_NUM_SPT_PAGES;
+ dp->num_spt_pages = ath12k_dp_get_num_spt_pages(ab);
if (dp->num_spt_pages > ATH12K_MAX_PPT_ENTRIES)
dp->num_spt_pages = ATH12K_MAX_PPT_ENTRIES;
@@ -1572,7 +1606,7 @@ static int ath12k_dp_cc_init(struct ath12k_base *ab)
return -ENOMEM;
}
- dp->rx_ppt_base = ab->device_id * ATH12K_NUM_RX_SPT_PAGES;
+ dp->rx_ppt_base = ab->device_id * ATH12K_NUM_RX_SPT_PAGES(ab);
for (i = 0; i < dp->num_spt_pages; i++) {
dp->spt_info[i].vaddr = dma_alloc_coherent(ab->dev,
@@ -1747,7 +1781,8 @@ int ath12k_dp_alloc(struct ath12k_base *ab)
if (ret)
goto fail_dp_bank_profiles_cleanup;
- size = sizeof(struct hal_wbm_release_ring_tx) * DP_TX_COMP_RING_SIZE;
+ size = sizeof(struct hal_wbm_release_ring_tx) *
+ DP_TX_COMP_RING_SIZE(ab);
ret = ath12k_dp_reoq_lut_setup(ab);
if (ret) {
@@ -1759,7 +1794,7 @@ int ath12k_dp_alloc(struct ath12k_base *ab)
dp->tx_ring[i].tcl_data_ring_id = i;
dp->tx_ring[i].tx_status_head = 0;
- dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
+ dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE(ab) - 1;
dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
if (!dp->tx_ring[i].tx_status) {
ret = -ENOMEM;