diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath12k/qmi.c')
-rw-r--r-- | drivers/net/wireless/ath/ath12k/qmi.c | 773 |
1 files changed, 670 insertions, 103 deletions
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c index 92845ffff44a..99e1fb2910d0 100644 --- a/drivers/net/wireless/ath/ath12k/qmi.c +++ b/drivers/net/wireless/ath/ath12k/qmi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/elf.h> @@ -11,6 +11,8 @@ #include "debug.h" #include <linux/of.h> #include <linux/firmware.h> +#include <linux/of_address.h> +#include <linux/ioport.h> #define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02 #define HOST_CSTATE_BIT 0x04 @@ -583,6 +585,24 @@ static const struct qmi_elem_info qmi_wlanfw_phy_cap_resp_msg_v01_ei[] = { board_id), }, { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01, + single_chip_mlo_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01, + single_chip_mlo_support), + }, + { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, @@ -1998,49 +2018,145 @@ static const struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = { }, }; -static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab, - struct qmi_wlanfw_host_cap_req_msg_v01 *req) +static void ath12k_host_cap_hw_link_id_init(struct ath12k_hw_group *ag) +{ + struct ath12k_base *ab, *partner_ab; + int i, j, hw_id_base; + + for (i = 0; i < ag->num_devices; i++) { + hw_id_base = 0; + ab = ag->ab[i]; + + for (j = 0; j < ag->num_devices; j++) { + partner_ab = ag->ab[j]; + + if (partner_ab->wsi_info.index >= ab->wsi_info.index) + continue; + + hw_id_base += partner_ab->qmi.num_radios; + } + + ab->wsi_info.hw_link_id_base = hw_id_base; + } + + ag->hw_link_id_init_done = true; +} + +static int ath12k_host_cap_parse_mlo(struct ath12k_base *ab, + struct qmi_wlanfw_host_cap_req_msg_v01 *req) { struct wlfw_host_mlo_chip_info_s_v01 *info; + struct ath12k_hw_group *ag = ab->ag; + struct ath12k_base *partner_ab; u8 hw_link_id = 0; - int i; + int i, j, ret; + + if (!ag->mlo_capable) { + ath12k_dbg(ab, ATH12K_DBG_QMI, + "MLO is disabled hence skip QMI MLO cap"); + return 0; + } if (!ab->qmi.num_radios || ab->qmi.num_radios == U8_MAX) { + ag->mlo_capable = false; ath12k_dbg(ab, ATH12K_DBG_QMI, "skip QMI MLO cap due to invalid num_radio %d\n", ab->qmi.num_radios); - return; + return 0; + } + + if (ab->device_id == ATH12K_INVALID_DEVICE_ID) { + ath12k_err(ab, "failed to send MLO cap due to invalid device id\n"); + return -EINVAL; } req->mlo_capable_valid = 1; req->mlo_capable = 1; req->mlo_chip_id_valid = 1; - req->mlo_chip_id = 0; + req->mlo_chip_id = ab->device_id; req->mlo_group_id_valid = 1; - req->mlo_group_id = 0; + req->mlo_group_id = ag->id; req->max_mlo_peer_valid = 1; /* Max peer number generally won't change for the same device * but needs to be synced with host driver. */ req->max_mlo_peer = ab->hw_params->max_mlo_peer; req->mlo_num_chips_valid = 1; - req->mlo_num_chips = 1; + req->mlo_num_chips = ag->num_devices; + + ath12k_dbg(ab, ATH12K_DBG_QMI, "mlo capability advertisement device_id %d group_id %d num_devices %d", + req->mlo_chip_id, req->mlo_group_id, req->mlo_num_chips); - info = &req->mlo_chip_info[0]; - info->chip_id = 0; - info->num_local_links = ab->qmi.num_radios; + mutex_lock(&ag->mutex); - for (i = 0; i < info->num_local_links; i++) { - info->hw_link_id[i] = hw_link_id; - info->valid_mlo_link_id[i] = 1; + if (!ag->hw_link_id_init_done) + ath12k_host_cap_hw_link_id_init(ag); + + for (i = 0; i < ag->num_devices; i++) { + info = &req->mlo_chip_info[i]; + partner_ab = ag->ab[i]; + + if (partner_ab->device_id == ATH12K_INVALID_DEVICE_ID) { + ath12k_err(ab, "failed to send MLO cap due to invalid partner device id\n"); + ret = -EINVAL; + goto device_cleanup; + } - hw_link_id++; + info->chip_id = partner_ab->device_id; + info->num_local_links = partner_ab->qmi.num_radios; + + ath12k_dbg(ab, ATH12K_DBG_QMI, "mlo device id %d num_link %d\n", + info->chip_id, info->num_local_links); + + for (j = 0; j < info->num_local_links; j++) { + info->hw_link_id[j] = partner_ab->wsi_info.hw_link_id_base + j; + info->valid_mlo_link_id[j] = 1; + + ath12k_dbg(ab, ATH12K_DBG_QMI, "mlo hw_link_id %d\n", + info->hw_link_id[j]); + + hw_link_id++; + } } + if (hw_link_id <= 0) + ag->mlo_capable = false; + req->mlo_chip_info_valid = 1; + + mutex_unlock(&ag->mutex); + + return 0; + +device_cleanup: + for (i = i - 1; i >= 0; i--) { + info = &req->mlo_chip_info[i]; + + memset(info, 0, sizeof(*info)); + } + + req->mlo_num_chips = 0; + req->mlo_num_chips_valid = 0; + + req->max_mlo_peer = 0; + req->max_mlo_peer_valid = 0; + req->mlo_group_id = 0; + req->mlo_group_id_valid = 0; + req->mlo_chip_id = 0; + req->mlo_chip_id_valid = 0; + req->mlo_capable = 0; + req->mlo_capable_valid = 0; + + ag->mlo_capable = false; + + mutex_unlock(&ag->mutex); + + return ret; } -static int ath12k_qmi_host_cap_send(struct ath12k_base *ab) +/* clang stack usage explodes if this is inlined */ +static noinline_for_stack +int ath12k_qmi_host_cap_send(struct ath12k_base *ab) { struct qmi_wlanfw_host_cap_req_msg_v01 req = {}; struct qmi_wlanfw_host_cap_resp_msg_v01 resp = {}; @@ -2054,10 +2170,12 @@ static int ath12k_qmi_host_cap_send(struct ath12k_base *ab) req.bdf_support_valid = 1; req.bdf_support = 1; - req.m3_support_valid = 1; - req.m3_support = 1; - req.m3_cache_support_valid = 1; - req.m3_cache_support = 1; + if (ab->hw_params->fw.m3_loader == ath12k_m3_fw_loader_driver) { + req.m3_support_valid = 1; + req.m3_support = 1; + req.m3_cache_support_valid = 1; + req.m3_cache_support = 1; + } req.cal_done_valid = 1; req.cal_done = ab->qmi.cal_done; @@ -2085,7 +2203,9 @@ static int ath12k_qmi_host_cap_send(struct ath12k_base *ab) req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET; } - ath12k_host_cap_parse_mlo(ab, &req); + ret = ath12k_host_cap_parse_mlo(ab, &req); + if (ret < 0) + goto out; ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp); @@ -2124,9 +2244,6 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab) struct qmi_txn txn; int ret; - if (!ab->slo_capable) - goto out; - ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_phy_cap_resp_msg_v01_ei, &resp); if (ret < 0) @@ -2151,6 +2268,9 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab) goto out; } + if (resp.single_chip_mlo_support_valid && resp.single_chip_mlo_support) + ab->single_chip_mlo_support = true; + if (!resp.num_phy_valid) { ret = -ENODATA; goto out; @@ -2158,7 +2278,9 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab) ab->qmi.num_radios = resp.num_phy; - ath12k_dbg(ab, ATH12K_DBG_QMI, "phy capability resp valid %d num_phy %d valid %d board_id %d\n", + ath12k_dbg(ab, ATH12K_DBG_QMI, + "phy capability resp valid %d single_chip_mlo_support %d valid %d num_phy %d valid %d board_id %d\n", + resp.single_chip_mlo_support_valid, resp.single_chip_mlo_support, resp.num_phy_valid, resp.num_phy, resp.board_id_valid, resp.board_id); @@ -2243,7 +2365,9 @@ resp_out: return ret; } -static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab) +/* clang stack usage explodes if this is inlined */ +static noinline_for_stack +int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab) { struct qmi_wlanfw_respond_mem_req_msg_v01 *req; struct qmi_wlanfw_respond_mem_resp_msg_v01 resp = {}; @@ -2260,7 +2384,8 @@ static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab) * failure to firmware and firmware then request multiple blocks of * small chunk size memory. */ - if (ab->qmi.target_mem_delayed) { + if (!test_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags) && + ab->qmi.target_mem_delayed) { delayed = true; ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi delays mem_request %d\n", ab->qmi.mem_seg_count); @@ -2318,29 +2443,161 @@ out: return ret; } -static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab) +void ath12k_qmi_reset_mlo_mem(struct ath12k_hw_group *ag) { + struct target_mem_chunk *mlo_chunk; int i; - for (i = 0; i < ab->qmi.mem_seg_count; i++) { - if (!ab->qmi.target_mem[i].v.addr) - continue; + lockdep_assert_held(&ag->mutex); + + if (!ag->mlo_mem.init_done || ag->num_started) + return; + + for (i = 0; i < ARRAY_SIZE(ag->mlo_mem.chunk); i++) { + mlo_chunk = &ag->mlo_mem.chunk[i]; + + if (mlo_chunk->v.addr) + /* TODO: Mode 0 recovery is the default mode hence resetting the + * whole memory region for now. Once Mode 1 support is added, this + * needs to be handled properly + */ + memset(mlo_chunk->v.addr, 0, mlo_chunk->size); + } +} + +static void ath12k_qmi_free_mlo_mem_chunk(struct ath12k_base *ab, + struct target_mem_chunk *chunk, + int idx) +{ + struct ath12k_hw_group *ag = ab->ag; + struct target_mem_chunk *mlo_chunk; + bool fixed_mem; + + lockdep_assert_held(&ag->mutex); + + if (!ag->mlo_mem.init_done || ag->num_started) + return; + + if (idx >= ARRAY_SIZE(ag->mlo_mem.chunk)) { + ath12k_warn(ab, "invalid index for MLO memory chunk free: %d\n", idx); + return; + } + + fixed_mem = test_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags); + mlo_chunk = &ag->mlo_mem.chunk[idx]; + + if (fixed_mem && mlo_chunk->v.ioaddr) { + iounmap(mlo_chunk->v.ioaddr); + mlo_chunk->v.ioaddr = NULL; + } else if (mlo_chunk->v.addr) { dma_free_coherent(ab->dev, - ab->qmi.target_mem[i].size, - ab->qmi.target_mem[i].v.addr, - ab->qmi.target_mem[i].paddr); - ab->qmi.target_mem[i].v.addr = NULL; + mlo_chunk->size, + mlo_chunk->v.addr, + mlo_chunk->paddr); + mlo_chunk->v.addr = NULL; } + + mlo_chunk->paddr = 0; + mlo_chunk->size = 0; + if (fixed_mem) + chunk->v.ioaddr = NULL; + else + chunk->v.addr = NULL; + chunk->paddr = 0; + chunk->size = 0; +} + +static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab) +{ + struct ath12k_hw_group *ag = ab->ag; + int i, mlo_idx; + + for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) { + if (ab->qmi.target_mem[i].type == MLO_GLOBAL_MEM_REGION_TYPE) { + ath12k_qmi_free_mlo_mem_chunk(ab, + &ab->qmi.target_mem[i], + mlo_idx++); + } else { + if (test_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags) && + ab->qmi.target_mem[i].v.ioaddr) { + iounmap(ab->qmi.target_mem[i].v.ioaddr); + ab->qmi.target_mem[i].v.ioaddr = NULL; + } else { + if (!ab->qmi.target_mem[i].v.addr) + continue; + dma_free_coherent(ab->dev, + ab->qmi.target_mem[i].prev_size, + ab->qmi.target_mem[i].v.addr, + ab->qmi.target_mem[i].paddr); + ab->qmi.target_mem[i].v.addr = NULL; + } + } + } + + if (!ag->num_started && ag->mlo_mem.init_done) { + ag->mlo_mem.init_done = false; + ag->mlo_mem.mlo_mem_size = 0; + } +} + +static int ath12k_qmi_alloc_chunk(struct ath12k_base *ab, + struct target_mem_chunk *chunk) +{ + /* Firmware reloads in recovery/resume. + * In such cases, no need to allocate memory for FW again. + */ + if (chunk->v.addr) { + if (chunk->prev_type == chunk->type && + chunk->prev_size == chunk->size) + goto this_chunk_done; + + /* cannot reuse the existing chunk */ + dma_free_coherent(ab->dev, chunk->prev_size, + chunk->v.addr, chunk->paddr); + chunk->v.addr = NULL; + } + + chunk->v.addr = dma_alloc_coherent(ab->dev, + chunk->size, + &chunk->paddr, + GFP_KERNEL | __GFP_NOWARN); + if (!chunk->v.addr) { + if (chunk->size > ATH12K_QMI_MAX_CHUNK_SIZE) { + ab->qmi.target_mem_delayed = true; + ath12k_warn(ab, + "qmi dma allocation failed (%d B type %u), will try later with small size\n", + chunk->size, + chunk->type); + ath12k_qmi_free_target_mem_chunk(ab); + return -EAGAIN; + } + ath12k_warn(ab, "memory allocation failure for %u size: %d\n", + chunk->type, chunk->size); + return -ENOMEM; + } + chunk->prev_type = chunk->type; + chunk->prev_size = chunk->size; +this_chunk_done: + return 0; } static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab) { - int i; - struct target_mem_chunk *chunk; + struct target_mem_chunk *chunk, *mlo_chunk; + struct ath12k_hw_group *ag = ab->ag; + int i, mlo_idx, ret; + int mlo_size = 0; + + mutex_lock(&ag->mutex); + + if (!ag->mlo_mem.init_done) { + memset(ag->mlo_mem.chunk, 0, sizeof(ag->mlo_mem.chunk)); + ag->mlo_mem.init_done = true; + } ab->qmi.target_mem_delayed = false; - for (i = 0; i < ab->qmi.mem_seg_count; i++) { + for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) { chunk = &ab->qmi.target_mem[i]; /* Allocate memory for the region and the functionality supported @@ -2352,24 +2609,41 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab) case M3_DUMP_REGION_TYPE: case PAGEABLE_MEM_REGION_TYPE: case CALDB_MEM_REGION_TYPE: - chunk->v.addr = dma_alloc_coherent(ab->dev, - chunk->size, - &chunk->paddr, - GFP_KERNEL | __GFP_NOWARN); - if (!chunk->v.addr) { - if (chunk->size > ATH12K_QMI_MAX_CHUNK_SIZE) { - ab->qmi.target_mem_delayed = true; - ath12k_warn(ab, - "qmi dma allocation failed (%d B type %u), will try later with small size\n", - chunk->size, - chunk->type); - ath12k_qmi_free_target_mem_chunk(ab); - return 0; + ret = ath12k_qmi_alloc_chunk(ab, chunk); + if (ret) + goto err; + break; + case MLO_GLOBAL_MEM_REGION_TYPE: + mlo_size += chunk->size; + if (ag->mlo_mem.mlo_mem_size && + mlo_size > ag->mlo_mem.mlo_mem_size) { + ath12k_err(ab, "QMI MLO memory allocation failure, requested size %d is more than allocated size %d", + mlo_size, ag->mlo_mem.mlo_mem_size); + ret = -EINVAL; + goto err; + } + + mlo_chunk = &ag->mlo_mem.chunk[mlo_idx]; + if (mlo_chunk->paddr) { + if (chunk->size != mlo_chunk->size) { + ath12k_err(ab, "QMI MLO chunk memory allocation failure for index %d, requested size %d is more than allocated size %d", + mlo_idx, chunk->size, mlo_chunk->size); + ret = -EINVAL; + goto err; } - ath12k_warn(ab, "memory allocation failure for %u size: %d\n", - chunk->type, chunk->size); - return -ENOMEM; + } else { + mlo_chunk->size = chunk->size; + mlo_chunk->type = chunk->type; + ret = ath12k_qmi_alloc_chunk(ab, mlo_chunk); + if (ret) + goto err; + memset(mlo_chunk->v.addr, 0, mlo_chunk->size); } + + chunk->paddr = mlo_chunk->paddr; + chunk->v.addr = mlo_chunk->v.addr; + mlo_idx++; + break; default: ath12k_warn(ab, "memory type %u not supported\n", @@ -2379,10 +2653,163 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab) break; } } + + if (!ag->mlo_mem.mlo_mem_size) { + ag->mlo_mem.mlo_mem_size = mlo_size; + } else if (ag->mlo_mem.mlo_mem_size != mlo_size) { + ath12k_err(ab, "QMI MLO memory size error, expected size is %d but requested size is %d", + ag->mlo_mem.mlo_mem_size, mlo_size); + ret = -EINVAL; + goto err; + } + + mutex_unlock(&ag->mutex); + + return 0; + +err: + ath12k_qmi_free_target_mem_chunk(ab); + + mutex_unlock(&ag->mutex); + + /* The firmware will attempt to request memory in smaller chunks + * on the next try. However, the current caller should be notified + * that this instance of request parsing was successful. + * Therefore, return 0 only. + */ + if (ret == -EAGAIN) + ret = 0; + + return ret; +} + +static int ath12k_qmi_assign_target_mem_chunk(struct ath12k_base *ab) +{ + struct reserved_mem *rmem; + size_t avail_rmem_size; + int i, idx, ret; + + for (i = 0, idx = 0; i < ab->qmi.mem_seg_count; i++) { + switch (ab->qmi.target_mem[i].type) { + case HOST_DDR_REGION_TYPE: + rmem = ath12k_core_get_reserved_mem(ab, 0); + if (!rmem) { + ret = -ENODEV; + goto out; + } + + avail_rmem_size = rmem->size; + if (avail_rmem_size < ab->qmi.target_mem[i].size) { + ath12k_dbg(ab, ATH12K_DBG_QMI, + "failed to assign mem type %u req size %u avail size %zu\n", + ab->qmi.target_mem[i].type, + ab->qmi.target_mem[i].size, + avail_rmem_size); + ret = -EINVAL; + goto out; + } + + ab->qmi.target_mem[idx].paddr = rmem->base; + ab->qmi.target_mem[idx].v.ioaddr = + ioremap(ab->qmi.target_mem[idx].paddr, + ab->qmi.target_mem[i].size); + if (!ab->qmi.target_mem[idx].v.ioaddr) { + ret = -EIO; + goto out; + } + ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size; + ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type; + idx++; + break; + case BDF_MEM_REGION_TYPE: + rmem = ath12k_core_get_reserved_mem(ab, 0); + if (!rmem) { + ret = -ENODEV; + goto out; + } + + avail_rmem_size = rmem->size - ab->hw_params->bdf_addr_offset; + if (avail_rmem_size < ab->qmi.target_mem[i].size) { + ath12k_dbg(ab, ATH12K_DBG_QMI, + "failed to assign mem type %u req size %u avail size %zu\n", + ab->qmi.target_mem[i].type, + ab->qmi.target_mem[i].size, + avail_rmem_size); + ret = -EINVAL; + goto out; + } + ab->qmi.target_mem[idx].paddr = + rmem->base + ab->hw_params->bdf_addr_offset; + ab->qmi.target_mem[idx].v.ioaddr = + ioremap(ab->qmi.target_mem[idx].paddr, + ab->qmi.target_mem[i].size); + if (!ab->qmi.target_mem[idx].v.ioaddr) { + ret = -EIO; + goto out; + } + ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size; + ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type; + idx++; + break; + case CALDB_MEM_REGION_TYPE: + /* Cold boot calibration is not enabled in Ath12k. Hence, + * assign paddr = 0. + * Once cold boot calibration is enabled add support to + * assign reserved memory from DT. + */ + ab->qmi.target_mem[idx].paddr = 0; + ab->qmi.target_mem[idx].v.ioaddr = NULL; + ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size; + ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type; + idx++; + break; + case M3_DUMP_REGION_TYPE: + rmem = ath12k_core_get_reserved_mem(ab, 1); + if (!rmem) { + ret = -EINVAL; + goto out; + } + + avail_rmem_size = rmem->size; + if (avail_rmem_size < ab->qmi.target_mem[i].size) { + ath12k_dbg(ab, ATH12K_DBG_QMI, + "failed to assign mem type %u req size %u avail size %zu\n", + ab->qmi.target_mem[i].type, + ab->qmi.target_mem[i].size, + avail_rmem_size); + ret = -EINVAL; + goto out; + } + + ab->qmi.target_mem[idx].paddr = rmem->base; + ab->qmi.target_mem[idx].v.ioaddr = + ioremap(ab->qmi.target_mem[idx].paddr, + ab->qmi.target_mem[i].size); + if (!ab->qmi.target_mem[idx].v.ioaddr) { + ret = -EIO; + goto out; + } + ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size; + ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type; + idx++; + break; + default: + ath12k_warn(ab, "qmi ignore invalid mem req type %u\n", + ab->qmi.target_mem[i].type); + break; + } + } + ab->qmi.mem_seg_count = idx; + return 0; +out: + ath12k_qmi_free_target_mem_chunk(ab); + return ret; } -static int ath12k_qmi_request_target_cap(struct ath12k_base *ab) +/* clang stack usage explodes if this is inlined */ +static noinline_for_stack +int ath12k_qmi_request_target_cap(struct ath12k_base *ab) { struct qmi_wlanfw_cap_req_msg_v01 req = {}; struct qmi_wlanfw_cap_resp_msg_v01 resp = {}; @@ -2452,7 +2879,7 @@ static int ath12k_qmi_request_target_cap(struct ath12k_base *ab) ab->qmi.dev_mem[i].size = resp.dev_mem[i].size; ath12k_dbg(ab, ATH12K_DBG_QMI, - "devmem [%d] start ox%llx size %llu\n", i, + "devmem [%d] start 0x%llx size %llu\n", i, ab->qmi.dev_mem[i].start, ab->qmi.dev_mem[i].size); } @@ -2476,6 +2903,15 @@ static int ath12k_qmi_request_target_cap(struct ath12k_base *ab) if (r) ath12k_dbg(ab, ATH12K_DBG_QMI, "SMBIOS bdf variant name not set.\n"); + r = ath12k_acpi_start(ab); + if (r) + /* ACPI is optional so continue in case of an error */ + ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", r); + + r = ath12k_acpi_check_bdf_variant_name(ab); + if (r) + ath12k_dbg(ab, ATH12K_DBG_BOOT, "ACPI bdf variant name not set.\n"); + out: return ret; } @@ -2487,7 +2923,7 @@ static int ath12k_qmi_load_file_target_mem(struct ath12k_base *ab, struct qmi_wlanfw_bdf_download_resp_msg_v01 resp = {}; struct qmi_txn txn; const u8 *temp = data; - int ret; + int ret = 0; u32 remaining = len; req = kzalloc(sizeof(*req), GFP_KERNEL); @@ -2568,8 +3004,10 @@ out: return ret; } -static int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab, - enum ath12k_qmi_bdf_type type) +/* clang stack usage explodes if this is inlined */ +static noinline_for_stack +int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab, + enum ath12k_qmi_bdf_type type) { struct device *dev = ab->dev; char filename[ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE]; @@ -2666,6 +3104,22 @@ out: return ret; } +static void ath12k_qmi_m3_free(struct ath12k_base *ab) +{ + struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; + + if (ab->hw_params->fw.m3_loader == ath12k_m3_fw_loader_remoteproc) + return; + + if (!m3_mem->vaddr) + return; + + dma_free_coherent(ab->dev, m3_mem->size, + m3_mem->vaddr, m3_mem->paddr); + m3_mem->vaddr = NULL; + m3_mem->size = 0; +} + static int ath12k_qmi_m3_load(struct ath12k_base *ab) { struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; @@ -2675,10 +3129,6 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab) size_t m3_len; int ret; - if (m3_mem->vaddr) - /* m3 firmware buffer is already available in the DMA buffer */ - return 0; - if (ab->fw.m3_data && ab->fw.m3_len > 0) { /* firmware-N.bin had a m3 firmware file so use that */ m3_data = ab->fw.m3_data; @@ -2700,6 +3150,15 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab) m3_len = fw->size; } + /* In recovery/resume cases, M3 buffer is not freed, try to reuse that */ + if (m3_mem->vaddr) { + if (m3_mem->size >= m3_len) + goto skip_m3_alloc; + + /* Old buffer is too small, free and reallocate */ + ath12k_qmi_m3_free(ab); + } + m3_mem->vaddr = dma_alloc_coherent(ab->dev, m3_len, &m3_mem->paddr, GFP_KERNEL); @@ -2710,6 +3169,7 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab) goto out; } +skip_m3_alloc: memcpy(m3_mem->vaddr, m3_data, m3_len); m3_mem->size = m3_len; @@ -2721,20 +3181,9 @@ out: return ret; } -static void ath12k_qmi_m3_free(struct ath12k_base *ab) -{ - struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; - - if (!m3_mem->vaddr) - return; - - dma_free_coherent(ab->dev, m3_mem->size, - m3_mem->vaddr, m3_mem->paddr); - m3_mem->vaddr = NULL; - m3_mem->size = 0; -} - -static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab) +/* clang stack usage explodes if this is inlined */ +static noinline_for_stack +int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab) { struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; struct qmi_wlanfw_m3_info_req_msg_v01 req = {}; @@ -2742,15 +3191,16 @@ static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab) struct qmi_txn txn; int ret = 0; - ret = ath12k_qmi_m3_load(ab); - if (ret) { - ath12k_err(ab, "failed to load m3 firmware: %d", ret); - return ret; + if (ab->hw_params->fw.m3_loader == ath12k_m3_fw_loader_driver) { + ret = ath12k_qmi_m3_load(ab); + if (ret) { + ath12k_err(ab, "failed to load m3 firmware: %d", ret); + return ret; + } + req.addr = m3_mem->paddr; + req.size = m3_mem->size; } - req.addr = m3_mem->paddr; - req.size = m3_mem->size; - ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_m3_info_resp_msg_v01_ei, &resp); if (ret < 0) @@ -2966,6 +3416,8 @@ void ath12k_qmi_firmware_stop(struct ath12k_base *ab) { int ret; + clear_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags); + ret = ath12k_qmi_wlanfw_mode_send(ab, ATH12K_FIRMWARE_MODE_OFF); if (ret < 0) { ath12k_warn(ab, "qmi failed to send wlan mode off\n"); @@ -3022,9 +3474,69 @@ ath12k_qmi_driver_event_post(struct ath12k_qmi *qmi, return 0; } -static int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi) +void ath12k_qmi_trigger_host_cap(struct ath12k_base *ab) { - struct ath12k_base *ab = qmi->ab; + struct ath12k_qmi *qmi = &ab->qmi; + + spin_lock(&qmi->event_lock); + + if (ath12k_qmi_get_event_block(qmi)) + ath12k_qmi_set_event_block(qmi, false); + + spin_unlock(&qmi->event_lock); + + ath12k_dbg(ab, ATH12K_DBG_QMI, "trigger host cap for device id %d\n", + ab->device_id); + + ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_HOST_CAP, NULL); +} + +static bool ath12k_qmi_hw_group_host_cap_ready(struct ath12k_hw_group *ag) +{ + struct ath12k_base *ab; + int i; + + for (i = 0; i < ag->num_devices; i++) { + ab = ag->ab[i]; + + if (!(ab && ab->qmi.num_radios != U8_MAX)) + return false; + } + + return true; +} + +static struct ath12k_base *ath12k_qmi_hw_group_find_blocked(struct ath12k_hw_group *ag) +{ + struct ath12k_base *ab; + int i; + + lockdep_assert_held(&ag->mutex); + + for (i = 0; i < ag->num_devices; i++) { + ab = ag->ab[i]; + if (!ab) + continue; + + spin_lock(&ab->qmi.event_lock); + + if (ath12k_qmi_get_event_block(&ab->qmi)) { + spin_unlock(&ab->qmi.event_lock); + return ab; + } + + spin_unlock(&ab->qmi.event_lock); + } + + return NULL; +} + +/* clang stack usage explodes if this is inlined */ +static noinline_for_stack +int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi) +{ + struct ath12k_base *ab = qmi->ab, *block_ab; + struct ath12k_hw_group *ag = ab->ag; int ret; ath12k_qmi_phy_cap_send(ab); @@ -3035,16 +3547,30 @@ static int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi) return ret; } - ret = ath12k_qmi_host_cap_send(ab); - if (ret < 0) { - ath12k_warn(ab, "qmi failed to send host cap QMI:%d\n", ret); - return ret; + spin_lock(&qmi->event_lock); + + ath12k_qmi_set_event_block(qmi, true); + + spin_unlock(&qmi->event_lock); + + mutex_lock(&ag->mutex); + + if (ath12k_qmi_hw_group_host_cap_ready(ag)) { + ath12k_core_hw_group_set_mlo_capable(ag); + + block_ab = ath12k_qmi_hw_group_find_blocked(ag); + if (block_ab) + ath12k_qmi_trigger_host_cap(block_ab); } + mutex_unlock(&ag->mutex); + return ret; } -static int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi) +/* clang stack usage explodes if this is inlined */ +static noinline_for_stack +int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi) { struct ath12k_base *ab = qmi->ab; int ret; @@ -3058,7 +3584,9 @@ static int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi) return ret; } -static int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi) +/* clang stack usage explodes if this is inlined */ +static noinline_for_stack +int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi) { struct ath12k_base *ab = qmi->ab; int ret; @@ -3122,11 +3650,20 @@ static void ath12k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl, msg->mem_seg[i].type, msg->mem_seg[i].size); } - ret = ath12k_qmi_alloc_target_mem_chunk(ab); - if (ret) { - ath12k_warn(ab, "qmi failed to alloc target memory: %d\n", - ret); - return; + if (test_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags)) { + ret = ath12k_qmi_assign_target_mem_chunk(ab); + if (ret) { + ath12k_warn(ab, "failed to assign qmi target memory: %d\n", + ret); + return; + } + } else { + ret = ath12k_qmi_alloc_target_mem_chunk(ab); + if (ret) { + ath12k_warn(ab, "qmi failed to alloc target memory: %d\n", + ret); + return; + } } ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_REQUEST_MEM, NULL); @@ -3178,6 +3715,9 @@ static const struct qmi_msg_handler ath12k_qmi_msg_handlers[] = { .decoded_size = sizeof(struct qmi_wlanfw_fw_ready_ind_msg_v01), .fn = ath12k_qmi_msg_fw_ready_cb, }, + + /* end of list */ + {}, }; static int ath12k_qmi_ops_new_server(struct qmi_handle *qmi_hdl, @@ -3220,6 +3760,21 @@ static const struct qmi_ops ath12k_qmi_ops = { .del_server = ath12k_qmi_ops_del_server, }; +static int ath12k_qmi_event_host_cap(struct ath12k_qmi *qmi) +{ + struct ath12k_base *ab = qmi->ab; + int ret; + + ret = ath12k_qmi_host_cap_send(ab); + if (ret < 0) { + ath12k_warn(ab, "failed to send qmi host cap for device id %d: %d\n", + ab->device_id, ret); + return ret; + } + + return ret; +} + static void ath12k_qmi_driver_event_work(struct work_struct *work) { struct ath12k_qmi *qmi = container_of(work, struct ath12k_qmi, @@ -3246,7 +3801,6 @@ static void ath12k_qmi_driver_event_work(struct work_struct *work) break; case ATH12K_QMI_EVENT_SERVER_EXIT: set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags); - set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags); break; case ATH12K_QMI_EVENT_REQUEST_MEM: ret = ath12k_qmi_event_mem_request(qmi); @@ -3260,19 +3814,28 @@ static void ath12k_qmi_driver_event_work(struct work_struct *work) break; case ATH12K_QMI_EVENT_FW_READY: clear_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags); - if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) { - ath12k_hal_dump_srng_stats(ab); + if (test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags)) { + if (ab->is_reset) + ath12k_hal_dump_srng_stats(ab); + + set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags); queue_work(ab->workqueue, &ab->restart_work); break; } clear_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags); - clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags); - ath12k_core_qmi_firmware_ready(ab); - set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags); + ret = ath12k_core_qmi_firmware_ready(ab); + if (!ret) + set_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, + &ab->dev_flags); break; + case ATH12K_QMI_EVENT_HOST_CAP: + ret = ath12k_qmi_event_host_cap(qmi); + if (ret < 0) + set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags); + break; default: ath12k_warn(ab, "invalid event type: %d", event->type); break; @@ -3325,11 +3888,15 @@ int ath12k_qmi_init_service(struct ath12k_base *ab) void ath12k_qmi_deinit_service(struct ath12k_base *ab) { + if (!ab->qmi.ab) + return; + qmi_handle_release(&ab->qmi.handle); cancel_work_sync(&ab->qmi.event_work); destroy_workqueue(ab->qmi.event_wq); ath12k_qmi_m3_free(ab); ath12k_qmi_free_target_mem_chunk(ab); + ab->qmi.ab = NULL; } void ath12k_qmi_free_resource(struct ath12k_base *ab) |