summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/ath/ath12k
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath12k')
-rw-r--r--drivers/net/wireless/ath/ath12k/Kconfig20
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile4
-rw-r--r--drivers/net/wireless/ath/ath12k/acpi.c396
-rw-r--r--drivers/net/wireless/ath/ath12k/acpi.h76
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.h8
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c1034
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h410
-rw-r--r--drivers/net/wireless/ath/ath12k/coredump.c54
-rw-r--r--drivers/net/wireless/ath/ath12k/coredump.h81
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.h8
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.c107
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.h34
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c4389
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h1494
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c330
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h67
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c324
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c706
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h17
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c252
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.h7
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c19
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h25
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h79
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c12
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h65
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_tx.h4
-rw-r--r--drivers/net/wireless/ath/ath12k/hif.h29
-rw-r--r--drivers/net/wireless/ath/ath12k/htc.c10
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c51
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h15
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c5660
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h45
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c108
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.h7
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.c20
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c298
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.c238
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.h30
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c552
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h25
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c54
-rw-r--r--drivers/net/wireless/ath/ath12k/rx_desc.h88
-rw-r--r--drivers/net/wireless/ath/ath12k/trace.h16
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c1490
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h908
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.c1027
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.h62
52 files changed, 18127 insertions, 2643 deletions
diff --git a/drivers/net/wireless/ath/ath12k/Kconfig b/drivers/net/wireless/ath/ath12k/Kconfig
index e135d2b1b61d..52a1bb19e3da 100644
--- a/drivers/net/wireless/ath/ath12k/Kconfig
+++ b/drivers/net/wireless/ath/ath12k/Kconfig
@@ -7,6 +7,7 @@ config ATH12K
select MHI_BUS
select QRTR
select QRTR_MHI
+ select PCI_PWRCTL_PWRSEQ if HAVE_PWRCTL
help
Enable support for Qualcomm Technologies Wi-Fi 7 (IEEE
802.11be) family of chipsets, for example WCN7850 and
@@ -24,6 +25,15 @@ config ATH12K_DEBUG
If unsure, say Y to make it easier to debug problems. But if
you want optimal performance choose N.
+config ATH12K_DEBUGFS
+ bool "QTI ath12k debugfs support"
+ depends on ATH12K && MAC80211_DEBUGFS
+ help
+ Enable ath12k debugfs support
+
+ If unsure, say Y to make it easier to debug problems. But if
+ you want optimal performance choose N.
+
config ATH12K_TRACING
bool "ath12k tracing support"
depends on ATH12K && EVENT_TRACING
@@ -32,3 +42,13 @@ config ATH12K_TRACING
If unsure, say Y to make it easier to debug problems. But if
you want optimal performance choose N.
+
+config ATH12K_COREDUMP
+ bool "ath12k coredump"
+ depends on ATH12K
+ select WANT_DEV_COREDUMP
+ help
+ Enable ath12k coredump collection
+
+ If unsure, say Y to make it easier to debug problems. But if
+ dump collection not required choose N.
diff --git a/drivers/net/wireless/ath/ath12k/Makefile b/drivers/net/wireless/ath/ath12k/Makefile
index 71669f94ff75..b5bb3e2599cd 100644
--- a/drivers/net/wireless/ath/ath12k/Makefile
+++ b/drivers/net/wireless/ath/ath12k/Makefile
@@ -23,7 +23,11 @@ ath12k-y += core.o \
fw.o \
p2p.o
+ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o
+ath12k-$(CONFIG_ACPI) += acpi.o
ath12k-$(CONFIG_ATH12K_TRACING) += trace.o
+ath12k-$(CONFIG_PM) += wow.o
+ath12k-$(CONFIG_ATH12K_COREDUMP) += coredump.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath12k/acpi.c b/drivers/net/wireless/ath/ath12k/acpi.c
new file mode 100644
index 000000000000..0555d35aab47
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/acpi.c
@@ -0,0 +1,396 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "acpi.h"
+#include "debug.h"
+
+static int ath12k_acpi_dsm_get_data(struct ath12k_base *ab, int func)
+{
+ union acpi_object *obj;
+ acpi_handle root_handle;
+ int ret;
+
+ root_handle = ACPI_HANDLE(ab->dev);
+ if (!root_handle) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "invalid acpi handler\n");
+ return -EOPNOTSUPP;
+ }
+
+ obj = acpi_evaluate_dsm(root_handle, ab->hw_params->acpi_guid, 0, func,
+ NULL);
+
+ if (!obj) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi_evaluate_dsm() failed\n");
+ return -ENOENT;
+ }
+
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ ab->acpi.func_bit = obj->integer.value;
+ } else if (obj->type == ACPI_TYPE_BUFFER) {
+ switch (func) {
+ case ATH12K_ACPI_DSM_FUNC_TAS_CFG:
+ if (obj->buffer.length != ATH12K_ACPI_DSM_TAS_CFG_SIZE) {
+ ath12k_warn(ab, "invalid ACPI DSM TAS config size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->acpi.tas_cfg, obj->buffer.pointer,
+ obj->buffer.length);
+
+ break;
+ case ATH12K_ACPI_DSM_FUNC_TAS_DATA:
+ if (obj->buffer.length != ATH12K_ACPI_DSM_TAS_DATA_SIZE) {
+ ath12k_warn(ab, "invalid ACPI DSM TAS data size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->acpi.tas_sar_power_table, obj->buffer.pointer,
+ obj->buffer.length);
+
+ break;
+ case ATH12K_ACPI_DSM_FUNC_BIOS_SAR:
+ if (obj->buffer.length != ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE) {
+ ath12k_warn(ab, "invalid ACPI BIOS SAR data size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->acpi.bios_sar_data, obj->buffer.pointer,
+ obj->buffer.length);
+
+ break;
+ case ATH12K_ACPI_DSM_FUNC_GEO_OFFSET:
+ if (obj->buffer.length != ATH12K_ACPI_DSM_GEO_OFFSET_DATA_SIZE) {
+ ath12k_warn(ab, "invalid ACPI GEO OFFSET data size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->acpi.geo_offset_data, obj->buffer.pointer,
+ obj->buffer.length);
+
+ break;
+ case ATH12K_ACPI_DSM_FUNC_INDEX_CCA:
+ if (obj->buffer.length != ATH12K_ACPI_DSM_CCA_DATA_SIZE) {
+ ath12k_warn(ab, "invalid ACPI DSM CCA data size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->acpi.cca_data, obj->buffer.pointer,
+ obj->buffer.length);
+
+ break;
+ case ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE:
+ if (obj->buffer.length != ATH12K_ACPI_DSM_BAND_EDGE_DATA_SIZE) {
+ ath12k_warn(ab, "invalid ACPI DSM band edge data size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->acpi.band_edge_power, obj->buffer.pointer,
+ obj->buffer.length);
+
+ break;
+ }
+ } else {
+ ath12k_warn(ab, "ACPI DSM method returned an unsupported object type: %d\n",
+ obj->type);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ ACPI_FREE(obj);
+ return ret;
+}
+
+static int ath12k_acpi_set_power_limit(struct ath12k_base *ab)
+{
+ const u8 *tas_sar_power_table = ab->acpi.tas_sar_power_table;
+ int ret;
+
+ if (tas_sar_power_table[0] != ATH12K_ACPI_TAS_DATA_VERSION ||
+ tas_sar_power_table[1] != ATH12K_ACPI_TAS_DATA_ENABLE) {
+ ath12k_warn(ab, "latest ACPI TAS data is invalid\n");
+ return -EINVAL;
+ }
+
+ ret = ath12k_wmi_set_bios_cmd(ab, WMI_BIOS_PARAM_TAS_DATA_TYPE,
+ tas_sar_power_table,
+ ATH12K_ACPI_DSM_TAS_DATA_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to send ACPI TAS data table: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ath12k_acpi_set_bios_sar_power(struct ath12k_base *ab)
+{
+ int ret;
+
+ if (ab->acpi.bios_sar_data[0] != ATH12K_ACPI_POWER_LIMIT_VERSION ||
+ ab->acpi.bios_sar_data[1] != ATH12K_ACPI_POWER_LIMIT_ENABLE_FLAG) {
+ ath12k_warn(ab, "invalid latest ACPI BIOS SAR data\n");
+ return -EINVAL;
+ }
+
+ ret = ath12k_wmi_set_bios_sar_cmd(ab, ab->acpi.bios_sar_data);
+ if (ret) {
+ ath12k_warn(ab, "failed to set ACPI BIOS SAR table: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath12k_acpi_dsm_notify(acpi_handle handle, u32 event, void *data)
+{
+ int ret;
+ struct ath12k_base *ab = data;
+
+ if (event == ATH12K_ACPI_NOTIFY_EVENT) {
+ ath12k_warn(ab, "unknown acpi notify %u\n", event);
+ return;
+ }
+
+ if (!ab->acpi.acpi_tas_enable) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi_tas_enable is false\n");
+ return;
+ }
+
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_TAS_DATA);
+ if (ret) {
+ ath12k_warn(ab, "failed to update ACPI TAS data table: %d\n", ret);
+ return;
+ }
+
+ ret = ath12k_acpi_set_power_limit(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to set ACPI TAS power limit data: %d", ret);
+ return;
+ }
+
+ if (!ab->acpi.acpi_bios_sar_enable)
+ return;
+
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_BIOS_SAR);
+ if (ret) {
+ ath12k_warn(ab, "failed to update BIOS SAR: %d\n", ret);
+ return;
+ }
+
+ ret = ath12k_acpi_set_bios_sar_power(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to set BIOS SAR power limit: %d\n", ret);
+ return;
+ }
+}
+
+static int ath12k_acpi_set_bios_sar_params(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_wmi_set_bios_sar_cmd(ab, ab->acpi.bios_sar_data);
+ if (ret) {
+ ath12k_warn(ab, "failed to set ACPI BIOS SAR table: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath12k_wmi_set_bios_geo_cmd(ab, ab->acpi.geo_offset_data);
+ if (ret) {
+ ath12k_warn(ab, "failed to set ACPI BIOS GEO table: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath12k_acpi_set_tas_params(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_wmi_set_bios_cmd(ab, WMI_BIOS_PARAM_TAS_CONFIG_TYPE,
+ ab->acpi.tas_cfg,
+ ATH12K_ACPI_DSM_TAS_CFG_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to send ACPI TAS config table parameter: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath12k_wmi_set_bios_cmd(ab, WMI_BIOS_PARAM_TAS_DATA_TYPE,
+ ab->acpi.tas_sar_power_table,
+ ATH12K_ACPI_DSM_TAS_DATA_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to send ACPI TAS data table parameter: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_acpi_start(struct ath12k_base *ab)
+{
+ acpi_status status;
+ u8 *buf;
+ int ret;
+
+ if (!ab->hw_params->acpi_guid)
+ /* not supported with this hardware */
+ return 0;
+
+ ab->acpi.acpi_tas_enable = false;
+
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS);
+ if (ret) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to get ACPI DSM data: %d\n", ret);
+ return ret;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_TAS_CFG)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_TAS_CFG);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI TAS config table: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_TAS_DATA)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_TAS_DATA);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI TAS data table: %d\n", ret);
+ return ret;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_TAS_CFG) &&
+ ab->acpi.tas_sar_power_table[0] == ATH12K_ACPI_TAS_DATA_VERSION &&
+ ab->acpi.tas_sar_power_table[1] == ATH12K_ACPI_TAS_DATA_ENABLE)
+ ab->acpi.acpi_tas_enable = true;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_BIOS_SAR)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_BIOS_SAR);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI bios sar data: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_GEO_OFFSET)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_GEO_OFFSET);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI geo offset data: %d\n", ret);
+ return ret;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_BIOS_SAR) &&
+ ab->acpi.bios_sar_data[0] == ATH12K_ACPI_POWER_LIMIT_VERSION &&
+ ab->acpi.bios_sar_data[1] == ATH12K_ACPI_POWER_LIMIT_ENABLE_FLAG &&
+ !ab->acpi.acpi_tas_enable)
+ ab->acpi.acpi_bios_sar_enable = true;
+ }
+
+ if (ab->acpi.acpi_tas_enable) {
+ ret = ath12k_acpi_set_tas_params(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to send ACPI parameters: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (ab->acpi.acpi_bios_sar_enable) {
+ ret = ath12k_acpi_set_bios_sar_params(ab);
+ if (ret)
+ return ret;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_CCA)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_INDEX_CCA);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI DSM CCA threshold configuration: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (ab->acpi.cca_data[0] == ATH12K_ACPI_CCA_THR_VERSION &&
+ ab->acpi.cca_data[ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET] ==
+ ATH12K_ACPI_CCA_THR_ENABLE_FLAG) {
+ buf = ab->acpi.cca_data + ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET;
+ ret = ath12k_wmi_set_bios_cmd(ab,
+ WMI_BIOS_PARAM_CCA_THRESHOLD_TYPE,
+ buf,
+ ATH12K_ACPI_CCA_THR_OFFSET_LEN);
+ if (ret) {
+ ath12k_warn(ab, "failed to set ACPI DSM CCA threshold: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi,
+ ATH12K_ACPI_FUNC_BIT_BAND_EDGE_CHAN_POWER)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI DSM band edge channel power: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (ab->acpi.band_edge_power[0] == ATH12K_ACPI_BAND_EDGE_VERSION &&
+ ab->acpi.band_edge_power[1] == ATH12K_ACPI_BAND_EDGE_ENABLE_FLAG) {
+ ret = ath12k_wmi_set_bios_cmd(ab,
+ WMI_BIOS_PARAM_TYPE_BANDEDGE,
+ ab->acpi.band_edge_power,
+ sizeof(ab->acpi.band_edge_power));
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to set ACPI DSM band edge channel power: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ status = acpi_install_notify_handler(ACPI_HANDLE(ab->dev),
+ ACPI_DEVICE_NOTIFY,
+ ath12k_acpi_dsm_notify, ab);
+ if (ACPI_FAILURE(status)) {
+ ath12k_warn(ab, "failed to install DSM notify callback: %d\n", status);
+ return -EIO;
+ }
+
+ ab->acpi.started = true;
+
+ return 0;
+}
+
+void ath12k_acpi_stop(struct ath12k_base *ab)
+{
+ if (!ab->acpi.started)
+ return;
+
+ acpi_remove_notify_handler(ACPI_HANDLE(ab->dev),
+ ACPI_DEVICE_NOTIFY,
+ ath12k_acpi_dsm_notify);
+
+ memset(&ab->acpi, 0, sizeof(ab->acpi));
+}
diff --git a/drivers/net/wireless/ath/ath12k/acpi.h b/drivers/net/wireless/ath/ath12k/acpi.h
new file mode 100644
index 000000000000..39e003d86a48
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/acpi.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef ATH12K_ACPI_H
+#define ATH12K_ACPI_H
+
+#include <linux/acpi.h>
+
+#define ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS 0
+#define ATH12K_ACPI_DSM_FUNC_BIOS_SAR 4
+#define ATH12K_ACPI_DSM_FUNC_GEO_OFFSET 5
+#define ATH12K_ACPI_DSM_FUNC_INDEX_CCA 6
+#define ATH12K_ACPI_DSM_FUNC_TAS_CFG 8
+#define ATH12K_ACPI_DSM_FUNC_TAS_DATA 9
+#define ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE 10
+
+#define ATH12K_ACPI_FUNC_BIT_BIOS_SAR BIT(3)
+#define ATH12K_ACPI_FUNC_BIT_GEO_OFFSET BIT(4)
+#define ATH12K_ACPI_FUNC_BIT_CCA BIT(5)
+#define ATH12K_ACPI_FUNC_BIT_TAS_CFG BIT(7)
+#define ATH12K_ACPI_FUNC_BIT_TAS_DATA BIT(8)
+#define ATH12K_ACPI_FUNC_BIT_BAND_EDGE_CHAN_POWER BIT(9)
+
+#define ATH12K_ACPI_NOTIFY_EVENT 0x86
+#define ATH12K_ACPI_FUNC_BIT_VALID(_acdata, _func) (((_acdata).func_bit) & (_func))
+
+#define ATH12K_ACPI_TAS_DATA_VERSION 0x1
+#define ATH12K_ACPI_TAS_DATA_ENABLE 0x1
+#define ATH12K_ACPI_POWER_LIMIT_VERSION 0x1
+#define ATH12K_ACPI_POWER_LIMIT_ENABLE_FLAG 0x1
+#define ATH12K_ACPI_CCA_THR_VERSION 0x1
+#define ATH12K_ACPI_CCA_THR_ENABLE_FLAG 0x1
+#define ATH12K_ACPI_BAND_EDGE_VERSION 0x1
+#define ATH12K_ACPI_BAND_EDGE_ENABLE_FLAG 0x1
+
+#define ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET 1
+#define ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET 2
+#define ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET 5
+#define ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN 10
+#define ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET 12
+#define ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN 18
+#define ATH12K_ACPI_BIOS_SAR_TABLE_LEN 22
+#define ATH12K_ACPI_CCA_THR_OFFSET_LEN 36
+
+#define ATH12K_ACPI_DSM_TAS_DATA_SIZE 69
+#define ATH12K_ACPI_DSM_BAND_EDGE_DATA_SIZE 100
+#define ATH12K_ACPI_DSM_TAS_CFG_SIZE 108
+
+#define ATH12K_ACPI_DSM_GEO_OFFSET_DATA_SIZE (ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET + \
+ ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN)
+#define ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE (ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET + \
+ ATH12K_ACPI_BIOS_SAR_TABLE_LEN)
+#define ATH12K_ACPI_DSM_CCA_DATA_SIZE (ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET + \
+ ATH12K_ACPI_CCA_THR_OFFSET_LEN)
+
+#ifdef CONFIG_ACPI
+
+int ath12k_acpi_start(struct ath12k_base *ab);
+void ath12k_acpi_stop(struct ath12k_base *ab);
+
+#else
+
+static inline int ath12k_acpi_start(struct ath12k_base *ab)
+{
+ return 0;
+}
+
+static inline void ath12k_acpi_stop(struct ath12k_base *ab)
+{
+}
+
+#endif /* CONFIG_ACPI */
+
+#endif /* ATH12K_ACPI_H */
diff --git a/drivers/net/wireless/ath/ath12k/ce.h b/drivers/net/wireless/ath/ath12k/ce.h
index 79af3b6159f1..1a14b9fb86b8 100644
--- a/drivers/net/wireless/ath/ath12k/ce.h
+++ b/drivers/net/wireless/ath/ath12k/ce.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_CE_H
@@ -119,7 +119,7 @@ struct ath12k_ce_ring {
/* Host address space */
void *base_addr_owner_space_unaligned;
/* CE address space */
- u32 base_addr_ce_space_unaligned;
+ dma_addr_t base_addr_ce_space_unaligned;
/* Actual start of descriptors.
* Aligned to descriptor-size boundary.
@@ -129,7 +129,7 @@ struct ath12k_ce_ring {
void *base_addr_owner_space;
/* CE address space */
- u32 base_addr_ce_space;
+ dma_addr_t base_addr_ce_space;
/* HAL ring id */
u32 hal_ring_id;
@@ -148,7 +148,7 @@ struct ath12k_ce_pipe {
void (*send_cb)(struct ath12k_ce_pipe *pipe);
void (*recv_cb)(struct ath12k_base *ab, struct sk_buff *skb);
- struct tasklet_struct intr_tq;
+ struct work_struct intr_wq;
struct ath12k_ce_ring *src_ring;
struct ath12k_ce_ring *dest_ring;
struct ath12k_ce_ring *status_ring;
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index 391b6fb2bd42..0606116d6b9c 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -9,17 +9,25 @@
#include <linux/remoteproc.h>
#include <linux/firmware.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include "core.h"
#include "dp_tx.h"
#include "dp_rx.h"
#include "debug.h"
#include "hif.h"
#include "fw.h"
+#include "debugfs.h"
+#include "wow.h"
unsigned int ath12k_debug_mask;
module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
+/* protected with ath12k_hw_group_mutex */
+static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
+
+static DEFINE_MUTEX(ath12k_hw_group_mutex);
+
static int ath12k_core_rfkill_config(struct ath12k_base *ab)
{
struct ath12k *ar;
@@ -41,69 +49,127 @@ static int ath12k_core_rfkill_config(struct ath12k_base *ab)
return ret;
}
-int ath12k_core_suspend(struct ath12k_base *ab)
+/* Check if we need to continue with suspend/resume operation.
+ * Return:
+ * a negative value: error happens and don't continue.
+ * 0: no error but don't continue.
+ * positive value: no error and do continue.
+ */
+static int ath12k_core_continue_suspend_resume(struct ath12k_base *ab)
{
- int ret;
+ struct ath12k *ar;
if (!ab->hw_params->supports_suspend)
return -EOPNOTSUPP;
- /* TODO: there can frames in queues so for now add delay as a hack.
- * Need to implement to handle and remove this delay.
+ /* so far single_pdev_only chips have supports_suspend as true
+ * so pass 0 as a dummy pdev_id here.
*/
- msleep(500);
+ ar = ab->pdevs[0].ar;
+ if (!ar || !ar->ah || ar->ah->state != ATH12K_HW_STATE_OFF)
+ return 0;
- ret = ath12k_dp_rx_pktlog_stop(ab, true);
- if (ret) {
- ath12k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n",
- ret);
+ return 1;
+}
+
+int ath12k_core_suspend(struct ath12k_base *ab)
+{
+ struct ath12k *ar;
+ int ret, i;
+
+ ret = ath12k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
return ret;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ if (!ar)
+ continue;
+
+ wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
+
+ ret = ath12k_mac_wait_tx_complete(ar);
+ if (ret) {
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
+ ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
+ return ret;
+ }
+
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
}
- ret = ath12k_dp_rx_pktlog_stop(ab, false);
- if (ret) {
- ath12k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n",
- ret);
+ /* PM framework skips suspend_late/resume_early callbacks
+ * if other devices report errors in their suspend callbacks.
+ * However ath12k_core_resume() would still be called because
+ * here we return success thus kernel put us on dpm_suspended_list.
+ * Since we won't go through a power down/up cycle, there is
+ * no chance to call complete(&ab->restart_completed) in
+ * ath12k_core_restart(), making ath12k_core_resume() timeout.
+ * So call it here to avoid this issue. This also works in case
+ * no error happens thus suspend_late/resume_early get called,
+ * because it will be reinitialized in ath12k_core_resume_early().
+ */
+ complete(&ab->restart_completed);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath12k_core_suspend);
+
+int ath12k_core_suspend_late(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
return ret;
- }
+
+ ath12k_acpi_stop(ab);
ath12k_hif_irq_disable(ab);
ath12k_hif_ce_irq_disable(ab);
- ret = ath12k_hif_suspend(ab);
- if (ret) {
- ath12k_warn(ab, "failed to suspend hif: %d\n", ret);
- return ret;
- }
+ ath12k_hif_power_down(ab, true);
return 0;
}
+EXPORT_SYMBOL(ath12k_core_suspend_late);
-int ath12k_core_resume(struct ath12k_base *ab)
+int ath12k_core_resume_early(struct ath12k_base *ab)
{
int ret;
- if (!ab->hw_params->supports_suspend)
- return -EOPNOTSUPP;
-
- ret = ath12k_hif_resume(ab);
- if (ret) {
- ath12k_warn(ab, "failed to resume hif during resume: %d\n", ret);
+ ret = ath12k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
return ret;
- }
- ath12k_hif_ce_irq_enable(ab);
- ath12k_hif_irq_enable(ab);
+ reinit_completion(&ab->restart_completed);
+ ret = ath12k_hif_power_up(ab);
+ if (ret)
+ ath12k_warn(ab, "failed to power up hif during resume: %d\n", ret);
- ret = ath12k_dp_rx_pktlog_start(ab);
- if (ret) {
- ath12k_warn(ab, "failed to start rx pktlog during resume: %d\n",
- ret);
+ return ret;
+}
+EXPORT_SYMBOL(ath12k_core_resume_early);
+
+int ath12k_core_resume(struct ath12k_base *ab)
+{
+ long time_left;
+ int ret;
+
+ ret = ath12k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
return ret;
+
+ time_left = wait_for_completion_timeout(&ab->restart_completed,
+ ATH12K_RESET_TIMEOUT_HZ);
+ if (time_left == 0) {
+ ath12k_warn(ab, "timeout while waiting for restart complete");
+ return -ETIMEDOUT;
}
return 0;
}
+EXPORT_SYMBOL(ath12k_core_resume);
static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
size_t name_len, bool with_variant,
@@ -539,12 +605,17 @@ u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab)
static void ath12k_core_stop(struct ath12k_base *ab)
{
+ ath12k_core_stopped(ab);
+
if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
ath12k_qmi_firmware_stop(ab);
+ ath12k_acpi_stop(ab);
+
+ ath12k_dp_rx_pdev_reo_cleanup(ab);
ath12k_hif_stop(ab);
ath12k_wmi_detach(ab);
- ath12k_dp_rx_pdev_reo_cleanup(ab);
+ ath12k_dp_free(ab);
/* De-Init of components as needed */
}
@@ -628,6 +699,8 @@ static int ath12k_core_soc_create(struct ath12k_base *ab)
return ret;
}
+ ath12k_debugfs_soc_create(ab);
+
ret = ath12k_hif_power_up(ab);
if (ret) {
ath12k_err(ab, "failed to power up :%d\n", ret);
@@ -637,14 +710,16 @@ static int ath12k_core_soc_create(struct ath12k_base *ab)
return 0;
err_qmi_deinit:
+ ath12k_debugfs_soc_destroy(ab);
ath12k_qmi_deinit_service(ab);
return ret;
}
static void ath12k_core_soc_destroy(struct ath12k_base *ab)
{
- ath12k_dp_free(ab);
+ ath12k_hif_power_down(ab, false);
ath12k_reg_free(ab);
+ ath12k_debugfs_soc_destroy(ab);
ath12k_qmi_deinit_service(ab);
}
@@ -652,30 +727,17 @@ static int ath12k_core_pdev_create(struct ath12k_base *ab)
{
int ret;
- ret = ath12k_mac_register(ab);
- if (ret) {
- ath12k_err(ab, "failed register the radio with mac80211: %d\n", ret);
- return ret;
- }
-
ret = ath12k_dp_pdev_alloc(ab);
if (ret) {
ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
- goto err_mac_unregister;
+ return ret;
}
return 0;
-
-err_mac_unregister:
- ath12k_mac_unregister(ab);
-
- return ret;
}
static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
{
- ath12k_mac_unregister(ab);
- ath12k_hif_irq_disable(ab);
ath12k_dp_pdev_free(ab);
}
@@ -684,6 +746,8 @@ static int ath12k_core_start(struct ath12k_base *ab,
{
int ret;
+ lockdep_assert_held(&ab->core_lock);
+
ret = ath12k_wmi_attach(ab);
if (ret) {
ath12k_err(ab, "failed to attach wmi: %d\n", ret);
@@ -733,19 +797,12 @@ static int ath12k_core_start(struct ath12k_base *ab,
goto err_hif_stop;
}
- ret = ath12k_mac_allocate(ab);
- if (ret) {
- ath12k_err(ab, "failed to create new hw device with mac80211 :%d\n",
- ret);
- goto err_hif_stop;
- }
-
ath12k_dp_cc_config(ab);
ret = ath12k_dp_rx_pdev_reo_setup(ab);
if (ret) {
ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
- goto err_mac_destroy;
+ goto err_hif_stop;
}
ath12k_dp_hal_rx_desc_init(ab);
@@ -779,12 +836,19 @@ static int ath12k_core_start(struct ath12k_base *ab,
goto err_reo_cleanup;
}
+ ret = ath12k_acpi_start(ab);
+ if (ret)
+ /* ACPI is optional so continue in case of an error */
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", ret);
+
+ if (!test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags))
+ /* Indicate the core start in the appropriate group */
+ ath12k_core_started(ab);
+
return 0;
err_reo_cleanup:
ath12k_dp_rx_pdev_reo_cleanup(ab);
-err_mac_destroy:
- ath12k_mac_destroy(ab);
err_hif_stop:
ath12k_hif_stop(ab);
err_wmi_detach:
@@ -792,6 +856,169 @@ err_wmi_detach:
return ret;
}
+static void ath12k_core_device_cleanup(struct ath12k_base *ab)
+{
+ mutex_lock(&ab->core_lock);
+
+ ath12k_hif_irq_disable(ab);
+ ath12k_core_pdev_destroy(ab);
+
+ mutex_unlock(&ab->core_lock);
+}
+
+static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ lockdep_assert_held(&ag->mutex);
+
+ clear_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
+
+ ath12k_mac_unregister(ag);
+
+ for (i = ag->num_devices - 1; i >= 0; i--) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+ ath12k_core_device_cleanup(ab);
+ }
+
+ ath12k_mac_destroy(ag);
+}
+
+static int __ath12k_mac_mlo_ready(struct ath12k *ar)
+{
+ int ret;
+
+ ret = ath12k_wmi_mlo_ready(ar);
+ if (ret) {
+ ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n",
+ ar->pdev_idx, ret);
+ return ret;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mlo ready done for pdev %d\n",
+ ar->pdev_idx);
+
+ return 0;
+}
+
+int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag)
+{
+ struct ath12k_hw *ah;
+ struct ath12k *ar;
+ int ret;
+ int i, j;
+
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ag->ah[i];
+ if (!ah)
+ continue;
+
+ for_each_ar(ah, ar, j) {
+ ar = &ah->radio[j];
+ ret = __ath12k_mac_mlo_ready(ar);
+ if (ret)
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+
+static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag)
+{
+ int ret, i;
+
+ if (!ag->mlo_capable || ag->num_devices == 1)
+ return 0;
+
+ ret = ath12k_mac_mlo_setup(ag);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ag->num_devices; i++)
+ ath12k_dp_partner_cc_init(ag->ab[i]);
+
+ ret = ath12k_mac_mlo_ready(ag);
+ if (ret)
+ goto err_mlo_teardown;
+
+ return 0;
+
+err_mlo_teardown:
+ ath12k_mac_mlo_teardown(ag);
+
+ return ret;
+}
+
+static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int ret, i;
+
+ lockdep_assert_held(&ag->mutex);
+
+ if (test_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags))
+ goto core_pdev_create;
+
+ ret = ath12k_mac_allocate(ag);
+ if (WARN_ON(ret))
+ return ret;
+
+ ret = ath12k_core_mlo_setup(ag);
+ if (WARN_ON(ret))
+ goto err_mac_destroy;
+
+ ret = ath12k_mac_register(ag);
+ if (WARN_ON(ret))
+ goto err_mlo_teardown;
+
+ set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
+
+core_pdev_create:
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ mutex_lock(&ab->core_lock);
+
+ ret = ath12k_core_pdev_create(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to create pdev core %d\n", ret);
+ mutex_unlock(&ab->core_lock);
+ goto err;
+ }
+
+ ath12k_hif_irq_enable(ab);
+
+ ret = ath12k_core_rfkill_config(ab);
+ if (ret && ret != -EOPNOTSUPP) {
+ mutex_unlock(&ab->core_lock);
+ goto err;
+ }
+
+ mutex_unlock(&ab->core_lock);
+ }
+
+ return 0;
+
+err:
+ ath12k_core_hw_group_stop(ag);
+ return ret;
+
+err_mlo_teardown:
+ ath12k_mac_mlo_teardown(ag);
+
+err_mac_destroy:
+ ath12k_mac_destroy(ag);
+
+ return ret;
+}
+
static int ath12k_core_start_firmware(struct ath12k_base *ab,
enum ath12k_firmware_mode mode)
{
@@ -809,9 +1036,37 @@ static int ath12k_core_start_firmware(struct ath12k_base *ab,
return ret;
}
+static inline
+bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
+{
+ lockdep_assert_held(&ag->mutex);
+
+ return (ag->num_started == ag->num_devices);
+}
+
+static void ath12k_core_trigger_partner(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag = ab->ag;
+ struct ath12k_base *partner_ab;
+ bool found = false;
+ int i;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ partner_ab = ag->ab[i];
+ if (!partner_ab)
+ continue;
+
+ if (found)
+ ath12k_qmi_trigger_host_cap(partner_ab);
+
+ found = (partner_ab == ab);
+ }
+}
+
int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
{
- int ret;
+ struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
+ int ret, i;
ret = ath12k_core_start_firmware(ab, ATH12K_FIRMWARE_MODE_NORMAL);
if (ret) {
@@ -831,41 +1086,52 @@ int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
goto err_firmware_stop;
}
+ mutex_lock(&ag->mutex);
mutex_lock(&ab->core_lock);
+
ret = ath12k_core_start(ab, ATH12K_FIRMWARE_MODE_NORMAL);
if (ret) {
ath12k_err(ab, "failed to start core: %d\n", ret);
goto err_dp_free;
}
- ret = ath12k_core_pdev_create(ab);
- if (ret) {
- ath12k_err(ab, "failed to create pdev core: %d\n", ret);
- goto err_core_stop;
- }
- ath12k_hif_irq_enable(ab);
+ mutex_unlock(&ab->core_lock);
- ret = ath12k_core_rfkill_config(ab);
- if (ret && ret != -EOPNOTSUPP) {
- ath12k_err(ab, "failed to config rfkill: %d\n", ret);
- goto err_core_pdev_destroy;
+ if (ath12k_core_hw_group_start_ready(ag)) {
+ ret = ath12k_core_hw_group_start(ag);
+ if (ret) {
+ ath12k_warn(ab, "unable to start hw group\n");
+ goto err_core_stop;
+ }
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id);
+ } else {
+ ath12k_core_trigger_partner(ab);
}
- mutex_unlock(&ab->core_lock);
+ mutex_unlock(&ag->mutex);
return 0;
-err_core_pdev_destroy:
- ath12k_core_pdev_destroy(ab);
err_core_stop:
- ath12k_core_stop(ab);
- ath12k_mac_destroy(ab);
+ for (i = ag->num_devices - 1; i >= 0; i--) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ mutex_lock(&ab->core_lock);
+ ath12k_core_stop(ab);
+ mutex_unlock(&ab->core_lock);
+ }
+ goto exit;
+
err_dp_free:
ath12k_dp_free(ab);
mutex_unlock(&ab->core_lock);
err_firmware_stop:
ath12k_qmi_firmware_stop(ab);
+exit:
+ mutex_unlock(&ag->mutex);
return ret;
}
@@ -874,9 +1140,8 @@ static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
int ret;
mutex_lock(&ab->core_lock);
- ath12k_hif_irq_disable(ab);
ath12k_dp_pdev_free(ab);
- ath12k_hif_stop(ab);
+ ath12k_ce_cleanup_pipes(ab);
ath12k_wmi_detach(ab);
ath12k_dp_rx_pdev_reo_cleanup(ab);
mutex_unlock(&ab->core_lock);
@@ -908,6 +1173,7 @@ err_hal_srng_deinit:
static void ath12k_rfkill_work(struct work_struct *work)
{
struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
+ struct ath12k_hw_group *ag = ab->ag;
struct ath12k *ar;
struct ath12k_hw *ah;
struct ieee80211_hw *hw;
@@ -918,8 +1184,8 @@ static void ath12k_rfkill_work(struct work_struct *work)
rfkill_radio_on = ab->rfkill_radio_on;
spin_unlock_bh(&ab->base_lock);
- for (i = 0; i < ab->num_hw; i++) {
- ah = ab->ah[i];
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ath12k_ag_to_ah(ag, i);
if (!ah)
continue;
@@ -940,7 +1206,7 @@ void ath12k_core_halt(struct ath12k *ar)
{
struct ath12k_base *ab = ar->ab;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
ar->num_created_vdevs = 0;
ar->allocated_vdev_map = 0;
@@ -959,10 +1225,10 @@ void ath12k_core_halt(struct ath12k *ar)
static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
{
+ struct ath12k_hw_group *ag = ab->ag;
struct ath12k *ar;
- struct ath12k_pdev *pdev;
struct ath12k_hw *ah;
- int i;
+ int i, j;
spin_lock_bh(&ab->base_lock);
ab->stats.fw_crash_counter++;
@@ -971,36 +1237,33 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
if (ab->is_reset)
set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
- for (i = 0; i < ab->num_hw; i++) {
- if (!ab->ah[i])
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ath12k_ag_to_ah(ag, i);
+ if (!ah || ah->state == ATH12K_HW_STATE_OFF)
continue;
- ah = ab->ah[i];
ieee80211_stop_queues(ah->hw);
- }
-
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- ar = pdev->ar;
- if (!ar || ar->state == ATH12K_STATE_OFF)
- continue;
- ath12k_mac_drain_tx(ar);
- complete(&ar->scan.started);
- complete(&ar->scan.completed);
- complete(&ar->scan.on_channel);
- complete(&ar->peer_assoc_done);
- complete(&ar->peer_delete_done);
- complete(&ar->install_key_done);
- complete(&ar->vdev_setup_done);
- complete(&ar->vdev_delete_done);
- complete(&ar->bss_survey_done);
+ for (j = 0; j < ah->num_radio; j++) {
+ ar = &ah->radio[j];
- wake_up(&ar->dp.tx_empty_waitq);
- idr_for_each(&ar->txmgmt_idr,
- ath12k_mac_tx_mgmt_pending_free, ar);
- idr_destroy(&ar->txmgmt_idr);
- wake_up(&ar->txmgmt_empty_waitq);
+ ath12k_mac_drain_tx(ar);
+ complete(&ar->scan.started);
+ complete(&ar->scan.completed);
+ complete(&ar->scan.on_channel);
+ complete(&ar->peer_assoc_done);
+ complete(&ar->peer_delete_done);
+ complete(&ar->install_key_done);
+ complete(&ar->vdev_setup_done);
+ complete(&ar->vdev_delete_done);
+ complete(&ar->bss_survey_done);
+
+ wake_up(&ar->dp.tx_empty_waitq);
+ idr_for_each(&ar->txmgmt_idr,
+ ath12k_mac_tx_mgmt_pending_free, ar);
+ idr_destroy(&ar->txmgmt_idr);
+ wake_up(&ar->txmgmt_empty_waitq);
+ }
}
wake_up(&ab->wmi_ab.tx_credits_wq);
@@ -1009,51 +1272,58 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
{
+ struct ath12k_hw_group *ag = ab->ag;
+ struct ath12k_hw *ah;
struct ath12k *ar;
- struct ath12k_pdev *pdev;
- int i;
+ int i, j;
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- ar = pdev->ar;
- if (!ar || ar->state == ATH12K_STATE_OFF)
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ath12k_ag_to_ah(ag, i);
+ if (!ah || ah->state == ATH12K_HW_STATE_OFF)
continue;
- mutex_lock(&ar->conf_mutex);
+ wiphy_lock(ah->hw->wiphy);
+ mutex_lock(&ah->hw_mutex);
+
+ switch (ah->state) {
+ case ATH12K_HW_STATE_ON:
+ ah->state = ATH12K_HW_STATE_RESTARTING;
+
+ for (j = 0; j < ah->num_radio; j++) {
+ ar = &ah->radio[j];
+ ath12k_core_halt(ar);
+ }
- switch (ar->state) {
- case ATH12K_STATE_ON:
- ar->state = ATH12K_STATE_RESTARTING;
- ath12k_core_halt(ar);
- ieee80211_restart_hw(ath12k_ar_to_hw(ar));
break;
- case ATH12K_STATE_OFF:
+ case ATH12K_HW_STATE_OFF:
ath12k_warn(ab,
- "cannot restart radio %d that hasn't been started\n",
+ "cannot restart hw %d that hasn't been started\n",
i);
break;
- case ATH12K_STATE_RESTARTING:
+ case ATH12K_HW_STATE_RESTARTING:
break;
- case ATH12K_STATE_RESTARTED:
- ar->state = ATH12K_STATE_WEDGED;
+ case ATH12K_HW_STATE_RESTARTED:
+ ah->state = ATH12K_HW_STATE_WEDGED;
fallthrough;
- case ATH12K_STATE_WEDGED:
+ case ATH12K_HW_STATE_WEDGED:
ath12k_warn(ab,
- "device is wedged, will not restart radio %d\n", i);
+ "device is wedged, will not restart hw %d\n", i);
break;
}
- mutex_unlock(&ar->conf_mutex);
+
+ mutex_unlock(&ah->hw_mutex);
+ wiphy_unlock(ah->hw->wiphy);
}
+
complete(&ab->driver_recovery);
}
static void ath12k_core_restart(struct work_struct *work)
{
struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
- int ret;
-
- if (!ab->is_reset)
- ath12k_core_pre_reconfigure_recovery(ab);
+ struct ath12k_hw_group *ag = ab->ag;
+ struct ath12k_hw *ah;
+ int ret, i;
ret = ath12k_core_reconfigure_on_crash(ab);
if (ret) {
@@ -1061,11 +1331,22 @@ static void ath12k_core_restart(struct work_struct *work)
return;
}
- if (ab->is_reset)
- complete_all(&ab->reconfigure_complete);
+ if (ab->is_reset) {
+ if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
+ atomic_dec(&ab->reset_count);
+ complete(&ab->reset_complete);
+ ab->is_reset = false;
+ atomic_set(&ab->fail_cont_count, 0);
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
+ }
- if (!ab->is_reset)
- ath12k_core_post_reconfigure_recovery(ab);
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ath12k_ag_to_ah(ab->ag, i);
+ ieee80211_restart_hw(ah->hw);
+ }
+ }
+
+ complete(&ab->restart_completed);
}
static void ath12k_core_reset(struct work_struct *work)
@@ -1074,7 +1355,7 @@ static void ath12k_core_reset(struct work_struct *work)
int reset_count, fail_cont_count;
long time_left;
- if (!(test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))) {
+ if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
return;
}
@@ -1117,22 +1398,19 @@ static void ath12k_core_reset(struct work_struct *work)
ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n");
ab->is_reset = true;
- atomic_set(&ab->recovery_start_count, 0);
- reinit_completion(&ab->recovery_start);
atomic_set(&ab->recovery_count, 0);
+ ath12k_coredump_collect(ab);
ath12k_core_pre_reconfigure_recovery(ab);
- reinit_completion(&ab->reconfigure_complete);
ath12k_core_post_reconfigure_recovery(ab);
ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n");
- time_left = wait_for_completion_timeout(&ab->recovery_start,
- ATH12K_RECOVER_START_TIMEOUT_HZ);
+ ath12k_hif_irq_disable(ab);
+ ath12k_hif_ce_irq_disable(ab);
- ath12k_hif_power_down(ab);
- ath12k_qmi_free_resource(ab);
+ ath12k_hif_power_down(ab, false);
ath12k_hif_power_up(ab);
ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
@@ -1153,32 +1431,453 @@ int ath12k_core_pre_init(struct ath12k_base *ab)
return 0;
}
-int ath12k_core_init(struct ath12k_base *ab)
+static int ath12k_core_panic_handler(struct notifier_block *nb,
+ unsigned long action, void *data)
{
- int ret;
+ struct ath12k_base *ab = container_of(nb, struct ath12k_base,
+ panic_nb);
- ret = ath12k_core_soc_create(ab);
- if (ret) {
- ath12k_err(ab, "failed to create soc core: %d\n", ret);
- return ret;
+ return ath12k_hif_panic_handler(ab);
+}
+
+static int ath12k_core_panic_notifier_register(struct ath12k_base *ab)
+{
+ ab->panic_nb.notifier_call = ath12k_core_panic_handler;
+
+ return atomic_notifier_chain_register(&panic_notifier_list,
+ &ab->panic_nb);
+}
+
+static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
+{
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &ab->panic_nb);
+}
+
+static inline
+bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag)
+{
+ lockdep_assert_held(&ag->mutex);
+
+ return (ag->num_probed == ag->num_devices);
+}
+
+static struct ath12k_hw_group *ath12k_core_hw_group_alloc(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag;
+ int count = 0;
+
+ lockdep_assert_held(&ath12k_hw_group_mutex);
+
+ list_for_each_entry(ag, &ath12k_hw_group_list, list)
+ count++;
+
+ ag = kzalloc(sizeof(*ag), GFP_KERNEL);
+ if (!ag)
+ return NULL;
+
+ ag->id = count;
+ list_add(&ag->list, &ath12k_hw_group_list);
+ mutex_init(&ag->mutex);
+ ag->mlo_capable = false;
+
+ return ag;
+}
+
+static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
+{
+ mutex_lock(&ath12k_hw_group_mutex);
+
+ list_del(&ag->list);
+ kfree(ag);
+
+ mutex_unlock(&ath12k_hw_group_mutex);
+}
+
+static struct ath12k_hw_group *ath12k_core_hw_group_find_by_dt(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag;
+ int i;
+
+ if (!ab->dev->of_node)
+ return NULL;
+
+ list_for_each_entry(ag, &ath12k_hw_group_list, list)
+ for (i = 0; i < ag->num_devices; i++)
+ if (ag->wsi_node[i] == ab->dev->of_node)
+ return ag;
+
+ return NULL;
+}
+
+static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag,
+ struct ath12k_base *ab)
+{
+ struct device_node *wsi_dev = ab->dev->of_node, *next_wsi_dev;
+ struct device_node *tx_endpoint, *next_rx_endpoint;
+ int device_count = 0;
+
+ next_wsi_dev = wsi_dev;
+
+ if (!next_wsi_dev)
+ return -ENODEV;
+
+ do {
+ ag->wsi_node[device_count] = next_wsi_dev;
+
+ tx_endpoint = of_graph_get_endpoint_by_regs(next_wsi_dev, 0, -1);
+ if (!tx_endpoint) {
+ of_node_put(next_wsi_dev);
+ return -ENODEV;
+ }
+
+ next_rx_endpoint = of_graph_get_remote_endpoint(tx_endpoint);
+ if (!next_rx_endpoint) {
+ of_node_put(next_wsi_dev);
+ of_node_put(tx_endpoint);
+ return -ENODEV;
+ }
+
+ of_node_put(tx_endpoint);
+ of_node_put(next_wsi_dev);
+
+ next_wsi_dev = of_graph_get_port_parent(next_rx_endpoint);
+ if (!next_wsi_dev) {
+ of_node_put(next_rx_endpoint);
+ return -ENODEV;
+ }
+
+ of_node_put(next_rx_endpoint);
+
+ device_count++;
+ if (device_count > ATH12K_MAX_SOCS) {
+ ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
+ device_count, ATH12K_MAX_SOCS);
+ of_node_put(next_wsi_dev);
+ return -EINVAL;
+ }
+ } while (wsi_dev != next_wsi_dev);
+
+ of_node_put(next_wsi_dev);
+ ag->num_devices = device_count;
+
+ return 0;
+}
+
+static int ath12k_core_get_wsi_index(struct ath12k_hw_group *ag,
+ struct ath12k_base *ab)
+{
+ int i, wsi_controller_index = -1, node_index = -1;
+ bool control;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ control = of_property_read_bool(ag->wsi_node[i], "qcom,wsi-controller");
+ if (control)
+ wsi_controller_index = i;
+
+ if (ag->wsi_node[i] == ab->dev->of_node)
+ node_index = i;
}
+ if (wsi_controller_index == -1) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi controller is not defined in dt");
+ return -EINVAL;
+ }
+
+ if (node_index == -1) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "unable to get WSI node index");
+ return -EINVAL;
+ }
+
+ ab->wsi_info.index = (ag->num_devices + node_index - wsi_controller_index) %
+ ag->num_devices;
+
return 0;
}
-void ath12k_core_deinit(struct ath12k_base *ab)
+static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab)
{
- mutex_lock(&ab->core_lock);
+ struct ath12k_wsi_info *wsi = &ab->wsi_info;
+ struct ath12k_hw_group *ag;
+
+ lockdep_assert_held(&ath12k_hw_group_mutex);
+
+ /* The grouping of multiple devices will be done based on device tree file.
+ * The platforms that do not have any valid group information would have
+ * each device to be part of its own invalid group.
+ *
+ * We use group id ATH12K_INVALID_GROUP_ID for single device group
+ * which didn't have dt entry or wrong dt entry, there could be many
+ * groups with same group id, i.e ATH12K_INVALID_GROUP_ID. So
+ * default group id of ATH12K_INVALID_GROUP_ID combined with
+ * num devices in ath12k_hw_group determines if the group is
+ * multi device or single device group
+ */
- ath12k_core_pdev_destroy(ab);
- ath12k_core_stop(ab);
+ ag = ath12k_core_hw_group_find_by_dt(ab);
+ if (!ag) {
+ ag = ath12k_core_hw_group_alloc(ab);
+ if (!ag) {
+ ath12k_warn(ab, "unable to create new hw group\n");
+ return NULL;
+ }
- mutex_unlock(&ab->core_lock);
+ if (ath12k_core_get_wsi_info(ag, ab) ||
+ ath12k_core_get_wsi_index(ag, ab)) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "unable to get wsi info from dt, grouping single device");
+ ag->id = ATH12K_INVALID_GROUP_ID;
+ ag->num_devices = 1;
+ memset(ag->wsi_node, 0, sizeof(ag->wsi_node));
+ wsi->index = 0;
+ }
- ath12k_hif_power_down(ab);
- ath12k_mac_destroy(ab);
- ath12k_core_soc_destroy(ab);
- ath12k_fw_unmap(ab);
+ goto exit;
+ } else if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "group id %d in unregister state\n",
+ ag->id);
+ goto invalid_group;
+ } else {
+ if (ath12k_core_get_wsi_index(ag, ab))
+ goto invalid_group;
+ goto exit;
+ }
+
+invalid_group:
+ ag = ath12k_core_hw_group_alloc(ab);
+ if (!ag) {
+ ath12k_warn(ab, "unable to create new hw group\n");
+ return NULL;
+ }
+
+ ag->id = ATH12K_INVALID_GROUP_ID;
+ ag->num_devices = 1;
+ wsi->index = 0;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n");
+
+exit:
+ if (ag->num_probed >= ag->num_devices) {
+ ath12k_warn(ab, "unable to add new device to group, max limit reached\n");
+ goto invalid_group;
+ }
+
+ ab->device_id = ag->num_probed++;
+ ag->ab[ab->device_id] = ab;
+ ab->ag = ag;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi group-id %d num-devices %d index %d",
+ ag->id, ag->num_devices, wsi->index);
+
+ return ag;
+}
+
+void ath12k_core_hw_group_unassign(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
+ u8 device_id = ab->device_id;
+ int num_probed;
+
+ if (!ag)
+ return;
+
+ mutex_lock(&ag->mutex);
+
+ if (WARN_ON(device_id >= ag->num_devices)) {
+ mutex_unlock(&ag->mutex);
+ return;
+ }
+
+ if (WARN_ON(ag->ab[device_id] != ab)) {
+ mutex_unlock(&ag->mutex);
+ return;
+ }
+
+ ag->ab[device_id] = NULL;
+ ab->ag = NULL;
+ ab->device_id = ATH12K_INVALID_DEVICE_ID;
+
+ if (ag->num_probed)
+ ag->num_probed--;
+
+ num_probed = ag->num_probed;
+
+ mutex_unlock(&ag->mutex);
+
+ if (!num_probed)
+ ath12k_core_hw_group_free(ag);
+}
+
+static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ if (WARN_ON(!ag))
+ return;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ ath12k_core_soc_destroy(ab);
+ }
+}
+
+static void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ if (!ag)
+ return;
+
+ mutex_lock(&ag->mutex);
+
+ if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
+ mutex_unlock(&ag->mutex);
+ return;
+ }
+
+ set_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags);
+
+ ath12k_core_hw_group_stop(ag);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ mutex_lock(&ab->core_lock);
+ ath12k_core_stop(ab);
+ mutex_unlock(&ab->core_lock);
+ }
+
+ mutex_unlock(&ag->mutex);
+}
+
+static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i, ret;
+
+ lockdep_assert_held(&ag->mutex);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ mutex_lock(&ab->core_lock);
+
+ ret = ath12k_core_soc_create(ab);
+ if (ret) {
+ mutex_unlock(&ab->core_lock);
+ ath12k_err(ab, "failed to create soc core: %d\n", ret);
+ return ret;
+ }
+
+ mutex_unlock(&ab->core_lock);
+ }
+
+ return 0;
+}
+
+void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ lockdep_assert_held(&ag->mutex);
+
+ /* If more than one devices are grouped, then inter MLO
+ * functionality can work still independent of whether internally
+ * each device supports single_chip_mlo or not.
+ * Only when there is one device, then it depends whether the
+ * device can support intra chip MLO or not
+ */
+ if (ag->num_devices > 1) {
+ ag->mlo_capable = true;
+ } else {
+ ab = ag->ab[0];
+ ag->mlo_capable = ab->single_chip_mlo_supp;
+
+ /* WCN chipsets does not advertise in firmware features
+ * hence skip checking
+ */
+ if (ab->hw_params->def_num_link)
+ return;
+ }
+
+ if (!ag->mlo_capable)
+ return;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ /* even if 1 device's firmware feature indicates MLO
+ * unsupported, make MLO unsupported for the whole group
+ */
+ if (!test_bit(ATH12K_FW_FEATURE_MLO, ab->fw.fw_features)) {
+ ag->mlo_capable = false;
+ return;
+ }
+ }
+}
+
+int ath12k_core_init(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag;
+ int ret;
+
+ ret = ath12k_core_panic_notifier_register(ab);
+ if (ret)
+ ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
+
+ mutex_lock(&ath12k_hw_group_mutex);
+
+ ag = ath12k_core_hw_group_assign(ab);
+ if (!ag) {
+ mutex_unlock(&ath12k_hw_group_mutex);
+ ath12k_warn(ab, "unable to get hw group\n");
+ return -ENODEV;
+ }
+
+ mutex_unlock(&ath12k_hw_group_mutex);
+
+ mutex_lock(&ag->mutex);
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n",
+ ag->num_devices, ag->num_probed);
+
+ if (ath12k_core_hw_group_create_ready(ag)) {
+ ret = ath12k_core_hw_group_create(ag);
+ if (ret) {
+ mutex_unlock(&ag->mutex);
+ ath12k_warn(ab, "unable to create hw group\n");
+ goto err;
+ }
+ }
+
+ mutex_unlock(&ag->mutex);
+
+ return 0;
+
+err:
+ ath12k_core_hw_group_destroy(ab->ag);
+ ath12k_core_hw_group_unassign(ab);
+ return ret;
+}
+
+void ath12k_core_deinit(struct ath12k_base *ab)
+{
+ ath12k_core_panic_notifier_unregister(ab);
+ ath12k_core_hw_group_cleanup(ab->ag);
+ ath12k_core_hw_group_destroy(ab->ag);
+ ath12k_core_hw_group_unassign(ab);
}
void ath12k_core_free(struct ath12k_base *ab)
@@ -1211,8 +1910,6 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
mutex_init(&ab->core_lock);
spin_lock_init(&ab->base_lock);
init_completion(&ab->reset_complete);
- init_completion(&ab->reconfigure_complete);
- init_completion(&ab->recovery_start);
INIT_LIST_HEAD(&ab->peers);
init_waitqueue_head(&ab->peer_mapping_wq);
@@ -1220,14 +1917,27 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
INIT_WORK(&ab->restart_work, ath12k_core_restart);
INIT_WORK(&ab->reset_work, ath12k_core_reset);
INIT_WORK(&ab->rfkill_work, ath12k_rfkill_work);
+ INIT_WORK(&ab->dump_work, ath12k_coredump_upload);
timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
+ init_completion(&ab->restart_completed);
+ init_completion(&ab->wow.wakeup_completed);
ab->dev = dev;
ab->hif.bus = bus;
ab->qmi.num_radios = U8_MAX;
- ab->slo_capable = true;
+ ab->single_chip_mlo_supp = false;
+
+ /* Device index used to identify the devices in a group.
+ *
+ * In Intra-device MLO, only one device present in a group,
+ * so it is always zero.
+ *
+ * In Inter-device MLO, Multiple device present in a group,
+ * expect non-zero value.
+ */
+ ab->device_id = 0;
return ab;
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index 97e5a0ccd233..ee595794a7ae 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_CORE_H
@@ -14,6 +14,7 @@
#include <linux/dmi.h>
#include <linux/ctype.h>
#include <linux/firmware.h>
+#include <linux/panic_notifier.h>
#include "qmi.h"
#include "htc.h"
#include "wmi.h"
@@ -26,6 +27,10 @@
#include "reg.h"
#include "dbring.h"
#include "fw.h"
+#include "acpi.h"
+#include "wow.h"
+#include "debugfs_htt_stats.h"
+#include "coredump.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -46,6 +51,7 @@
#define ATH12K_SMBIOS_BDF_EXT_MAGIC "BDF_"
#define ATH12K_INVALID_HW_MAC_ID 0xFF
+#define ATH12K_CONNECTION_LOSS_HZ (3 * HZ)
#define ATH12K_RX_RATE_TABLE_NUM 320
#define ATH12K_RX_RATE_TABLE_11AX_NUM 576
@@ -57,6 +63,14 @@
#define ATH12K_RECONFIGURE_TIMEOUT_HZ (10 * HZ)
#define ATH12K_RECOVER_START_TIMEOUT_HZ (20 * HZ)
+#define ATH12K_MAX_SOCS 3
+#define ATH12K_GROUP_MAX_RADIO (ATH12K_MAX_SOCS * MAX_RADIOS)
+#define ATH12K_INVALID_GROUP_ID 0xFF
+#define ATH12K_INVALID_DEVICE_ID 0xFF
+
+#define ATH12K_MAX_MLO_PEERS 256
+#define ATH12K_MLO_PEER_ID_INVALID 0xFFFF
+
enum ath12k_bdf_search {
ATH12K_BDF_SEARCH_DEFAULT,
ATH12K_BDF_SEARCH_BUS_AND_BOARD,
@@ -89,6 +103,14 @@ static inline enum wme_ac ath12k_tid_to_ac(u32 tid)
WME_AC_VO);
}
+static inline u64 ath12k_le32hilo_to_u64(__le32 hi, __le32 lo)
+{
+ u64 hi64 = le32_to_cpu(hi);
+ u64 lo64 = le32_to_cpu(lo);
+
+ return (hi64 << 32) | lo64;
+}
+
enum ath12k_skb_flags {
ATH12K_SKB_HW_80211_ENCAP = BIT(0),
ATH12K_SKB_CIPHER_SET = BIT(1),
@@ -101,6 +123,7 @@ struct ath12k_skb_cb {
dma_addr_t paddr_ext_desc;
u32 cipher;
u8 flags;
+ u8 link_id;
};
struct ath12k_skb_rxcb {
@@ -113,7 +136,7 @@ struct ath12k_skb_rxcb {
struct hal_rx_desc *rx_desc;
u8 err_rel_src;
u8 err_code;
- u8 mac_id;
+ u8 hw_link_id;
u8 unmapped;
u8 is_frag;
u8 tid;
@@ -144,7 +167,7 @@ struct ath12k_ext_irq_grp {
u32 grp_id;
u64 timestamp;
struct napi_struct napi;
- struct net_device napi_ndev;
+ struct net_device *napi_ndev;
};
struct ath12k_smbios_bdf {
@@ -178,8 +201,6 @@ struct ath12k_he {
u32 heop_param;
};
-#define MAX_RADIOS 3
-
enum {
WMI_HOST_TP_SCALE_MAX = 0,
WMI_HOST_TP_SCALE_50 = 1,
@@ -196,8 +217,13 @@ enum ath12k_scan_state {
ATH12K_SCAN_ABORTING,
};
+enum ath12k_hw_group_flags {
+ ATH12K_GROUP_FLAG_REGISTERED,
+ ATH12K_GROUP_FLAG_UNREGISTER,
+};
+
enum ath12k_dev_flags {
- ATH12K_CAC_RUNNING,
+ ATH12K_FLAG_CAC_RUNNING,
ATH12K_FLAG_CRASH_FLUSH,
ATH12K_FLAG_RAW_MODE,
ATH12K_FLAG_HW_CRYPTO_DISABLED,
@@ -208,16 +234,37 @@ enum ath12k_dev_flags {
ATH12K_FLAG_HTC_SUSPEND_COMPLETE,
ATH12K_FLAG_CE_IRQ_ENABLED,
ATH12K_FLAG_EXT_IRQ_ENABLED,
+ ATH12K_FLAG_QMI_FW_READY_COMPLETE,
};
-enum ath12k_monitor_flags {
- ATH12K_FLAG_MONITOR_ENABLED,
+struct ath12k_tx_conf {
+ bool changed;
+ u16 ac;
+ struct ieee80211_tx_queue_params tx_queue_params;
};
-struct ath12k_vif {
+struct ath12k_key_conf {
+ enum set_key_cmd cmd;
+ struct list_head list;
+ struct ieee80211_sta *sta;
+ struct ieee80211_key_conf *key;
+};
+
+struct ath12k_vif_cache {
+ struct ath12k_tx_conf tx_conf;
+ struct ath12k_key_conf key_conf;
+ u32 bss_conf_changed;
+};
+
+struct ath12k_rekey_data {
+ u8 kck[NL80211_KCK_LEN];
+ u8 kek[NL80211_KCK_LEN];
+ u64 replay_ctr;
+ bool enable_offload;
+};
+
+struct ath12k_link_vif {
u32 vdev_id;
- enum wmi_vdev_type vdev_type;
- enum wmi_vdev_subtype vdev_subtype;
u32 beacon_interval;
u32 dtim_period;
u16 ast_hash;
@@ -227,13 +274,38 @@ struct ath12k_vif {
u8 search_type;
struct ath12k *ar;
- struct ieee80211_vif *vif;
int bank_id;
u8 vdev_id_check_en;
struct wmi_wmm_params_all_arg wmm_params;
struct list_head list;
+
+ bool is_created;
+ bool is_started;
+ bool is_up;
+ u8 bssid[ETH_ALEN];
+ struct cfg80211_bitrate_mask bitrate_mask;
+ struct delayed_work connection_loss_work;
+ int num_legacy_stations;
+ int rtscts_prot_mode;
+ int txpower;
+ bool rsnie_present;
+ bool wpaie_present;
+ struct ieee80211_chanctx_conf chanctx;
+ u8 vdev_stats_id;
+ u32 punct_bitmap;
+ u8 link_id;
+ struct ath12k_vif *ahvif;
+ struct ath12k_rekey_data rekey_data;
+};
+
+struct ath12k_vif {
+ enum wmi_vdev_type vdev_type;
+ enum wmi_vdev_subtype vdev_subtype;
+ struct ieee80211_vif *vif;
+ struct ath12k_hw *ah;
+
union {
struct {
u32 uapsd;
@@ -251,27 +323,30 @@ struct ath12k_vif {
} ap;
} u;
- bool is_started;
- bool is_up;
u32 aid;
- u8 bssid[ETH_ALEN];
- struct cfg80211_bitrate_mask bitrate_mask;
- int num_legacy_stations;
- int rtscts_prot_mode;
- int txpower;
- bool rsnie_present;
- bool wpaie_present;
- struct ieee80211_chanctx_conf chanctx;
u32 key_cipher;
u8 tx_encap_type;
- u8 vdev_stats_id;
- u32 punct_bitmap;
bool ps;
+
+ struct ath12k_link_vif deflink;
+ struct ath12k_link_vif __rcu *link[ATH12K_NUM_MAX_LINKS];
+ struct ath12k_vif_cache *cache[IEEE80211_MLD_MAX_NUM_LINKS];
+ /* indicates bitmap of link vif created in FW */
+ u16 links_map;
+ u8 last_scan_link;
+
+ /* Must be last - ends in a flexible-array member.
+ *
+ * FIXME: Driver should not copy struct ieee80211_chanctx_conf,
+ * especially because it has a flexible array. Find a better way.
+ */
+ struct ieee80211_chanctx_conf chanctx;
};
struct ath12k_vif_iter {
u32 vdev_id;
- struct ath12k_vif *arvif;
+ struct ath12k *ar;
+ struct ath12k_link_vif *arvif;
};
#define HAL_AST_IDX_INVALID 0xFFFF
@@ -306,10 +381,6 @@ struct ath12k_rx_peer_stats {
u64 non_ampdu_msdu_count;
u64 stbc_count;
u64 beamformed_count;
- u64 mcs_count[HAL_RX_MAX_MCS + 1];
- u64 nss_count[HAL_RX_MAX_NSS];
- u64 bw_count[HAL_RX_BW_MAX];
- u64 gi_count[HAL_RX_GI_MAX];
u64 coding_count[HAL_RX_SU_MU_CODING_MAX];
u64 tid_count[IEEE80211_NUM_TIDS + 1];
u64 pream_cnt[HAL_RX_PREAMBLE_MAX];
@@ -406,39 +477,63 @@ struct ath12k_wbm_tx_stats {
u64 wbm_tx_comp_stats[HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX];
};
-struct ath12k_sta {
- struct ath12k_vif *arvif;
+struct ath12k_link_sta {
+ struct ath12k_link_vif *arvif;
+ struct ath12k_sta *ahsta;
+
+ /* link address similar to ieee80211_link_sta */
+ u8 addr[ETH_ALEN];
/* the following are protected by ar->data_lock */
u32 changed; /* IEEE80211_RC_* */
u32 bw;
u32 nss;
u32 smps;
- enum hal_pn_type pn_type;
- struct work_struct update_wk;
+ struct wiphy_work update_wk;
struct rate_info txrate;
struct rate_info last_txrate;
u64 rx_duration;
u64 tx_duration;
u8 rssi_comb;
+ u8 link_id;
struct ath12k_rx_peer_stats *rx_stats;
struct ath12k_wbm_tx_stats *wbm_tx_stats;
u32 bw_prev;
+
+ /* For now the assoc link will be considered primary */
+ bool is_assoc_link;
+
+ /* for firmware use only */
+ u8 link_idx;
+};
+
+struct ath12k_sta {
+ struct ath12k_vif *ahvif;
+ enum hal_pn_type pn_type;
+ struct ath12k_link_sta deflink;
+ struct ath12k_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+ /* indicates bitmap of link sta created in FW */
+ u16 links_map;
+ u8 assoc_link_id;
+ u16 ml_peer_id;
+ u8 num_peer;
+
+ enum ieee80211_sta_state state;
};
#define ATH12K_MIN_5G_FREQ 4150
#define ATH12K_MIN_6G_FREQ 5925
#define ATH12K_MAX_6G_FREQ 7115
-#define ATH12K_NUM_CHANS 100
+#define ATH12K_NUM_CHANS 101
#define ATH12K_MAX_5G_CHAN 173
-enum ath12k_state {
- ATH12K_STATE_OFF,
- ATH12K_STATE_ON,
- ATH12K_STATE_RESTARTING,
- ATH12K_STATE_RESTARTED,
- ATH12K_STATE_WEDGED,
+enum ath12k_hw_state {
+ ATH12K_HW_STATE_OFF,
+ ATH12K_HW_STATE_ON,
+ ATH12K_HW_STATE_RESTARTING,
+ ATH12K_HW_STATE_RESTARTED,
+ ATH12K_HW_STATE_WEDGED,
/* Add other states as required */
};
@@ -453,6 +548,19 @@ struct ath12k_fw_stats {
struct list_head bcn;
};
+struct ath12k_dbg_htt_stats {
+ enum ath12k_dbg_htt_ext_stats_type type;
+ u32 cfg_param[4];
+ u8 reset;
+ struct debug_htt_stats_req *stats_req;
+};
+
+struct ath12k_debug {
+ struct dentry *debugfs_pdev;
+ struct dentry *debugfs_pdev_symlink;
+ struct ath12k_dbg_htt_stats htt_stats;
+};
+
struct ath12k_per_peer_tx_stats {
u32 succ_bytes;
u32 retry_bytes;
@@ -483,7 +591,6 @@ struct ath12k {
u32 ht_cap_info;
u32 vht_cap_info;
struct ath12k_he ar_he;
- enum ath12k_state state;
bool supports_6ghz;
struct {
struct completion started;
@@ -492,9 +599,10 @@ struct ath12k {
struct delayed_work timeout;
enum ath12k_scan_state state;
bool is_roc;
- int vdev_id;
int roc_freq;
bool roc_notify;
+ struct wiphy_work vdev_clean_wk;
+ struct ath12k_link_vif *arvif;
} scan;
struct {
@@ -505,7 +613,6 @@ struct ath12k {
unsigned long dev_flags;
unsigned int filter_flags;
- unsigned long monitor_flags;
u32 min_tx_power;
u32 max_tx_power;
u32 txpower_limit_2g;
@@ -515,13 +622,9 @@ struct ath12k {
u32 chan_tx_pwr;
u32 num_stations;
u32 max_num_stations;
- bool monitor_present;
- /* To synchronize concurrent synchronous mac80211 callback operations,
- * concurrent debugfs configuration and concurrent FW statistics events.
- */
- struct mutex conf_mutex;
+
/* protects the radio specific data like debug stats, ppdu_stats_info stats,
- * vdev_stop_status info, scan data, ath12k_sta info, ath12k_vif info,
+ * vdev_stop_status info, scan data, ath12k_sta info, ath12k_link_vif info,
* channel context data, survey info, test mode data.
*/
spinlock_t data_lock;
@@ -582,9 +685,12 @@ struct ath12k {
struct work_struct regd_update_work;
- struct work_struct wmi_mgmt_tx_work;
+ struct wiphy_work wmi_mgmt_tx_work;
struct sk_buff_head wmi_mgmt_tx_queue;
+ struct ath12k_wow wow;
+ struct completion target_suspend;
+ bool target_suspend_ack;
struct ath12k_per_peer_tx_stats peer_tx_stats;
struct list_head ppdu_stats_info;
u32 ppdu_stat_list_depth;
@@ -592,18 +698,44 @@ struct ath12k {
struct ath12k_per_peer_tx_stats cached_stats;
u32 last_ppdu_id;
u32 cached_ppdu_id;
+#ifdef CONFIG_ATH12K_DEBUGFS
+ struct ath12k_debug debug;
+#endif
bool dfs_block_radar_events;
bool monitor_conf_enabled;
bool monitor_vdev_created;
bool monitor_started;
int monitor_vdev_id;
+
+ struct wiphy_radio_freq_range freq_range;
+
+ bool nlo_enabled;
+
+ struct completion mlo_setup_done;
+ u32 mlo_setup_status;
};
struct ath12k_hw {
struct ieee80211_hw *hw;
+ struct device *dev;
+
+ /* Protect the write operation of the hardware state ath12k_hw::state
+ * between hardware start<=>reconfigure<=>stop transitions.
+ */
+ struct mutex hw_mutex;
+ enum ath12k_hw_state state;
+ bool regd_updated;
+ bool use_6ghz_regd;
u8 num_radio;
+
+ DECLARE_BITMAP(free_ml_peer_id_map, ATH12K_MAX_MLO_PEERS);
+
+ /* protected by wiphy_lock() */
+ struct list_head ml_peers;
+
+ /* Keep last */
struct ath12k radio[] __aligned(sizeof(void *));
};
@@ -637,6 +769,8 @@ struct ath12k_pdev_cap {
u32 tx_chain_mask_shift;
u32 rx_chain_mask_shift;
struct ath12k_band_cap band[NUM_NL80211_BANDS];
+ u32 eml_cap;
+ u32 mld_cap;
};
struct mlo_timestamp {
@@ -653,6 +787,7 @@ struct mlo_timestamp {
struct ath12k_pdev {
struct ath12k *ar;
u32 pdev_id;
+ u32 hw_link_id;
struct ath12k_pdev_cap cap;
u8 mac_addr[ETH_ALEN];
struct mlo_timestamp timestamp;
@@ -688,6 +823,53 @@ struct ath12k_soc_dp_stats {
struct ath12k_soc_dp_tx_err_stats tx_err;
};
+struct ath12k_mlo_memory {
+ struct target_mem_chunk chunk[ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+ int mlo_mem_size;
+ bool init_done;
+};
+
+struct ath12k_hw_link {
+ u8 device_id;
+ u8 pdev_idx;
+};
+
+/* Holds info on the group of devices that are registered as a single
+ * wiphy, protected with struct ath12k_hw_group::mutex.
+ */
+struct ath12k_hw_group {
+ struct list_head list;
+ u8 id;
+ u8 num_devices;
+ u8 num_probed;
+ u8 num_started;
+ unsigned long flags;
+ struct ath12k_base *ab[ATH12K_MAX_SOCS];
+
+ /* protects access to this struct */
+ struct mutex mutex;
+
+ /* Holds information of wiphy (hw) registration.
+ *
+ * In Multi/Single Link Operation case, all pdevs are registered as
+ * a single wiphy. In other (legacy/Non-MLO) cases, each pdev is
+ * registered as separate wiphys.
+ */
+ struct ath12k_hw *ah[ATH12K_GROUP_MAX_RADIO];
+ u8 num_hw;
+ bool mlo_capable;
+ struct device_node *wsi_node[ATH12K_MAX_SOCS];
+ struct ath12k_mlo_memory mlo_mem;
+ struct ath12k_hw_link hw_links[ATH12K_GROUP_MAX_RADIO];
+ bool hw_link_id_init_done;
+};
+
+/* Holds WSI info specific to each device, excluding WSI group info */
+struct ath12k_wsi_info {
+ u32 index;
+ u32 hw_link_id_base;
+};
+
/* Master structure to hold the hw data which may be used in core module */
struct ath12k_base {
enum ath12k_hw_rev hw_rev;
@@ -696,10 +878,15 @@ struct ath12k_base {
struct ath12k_qmi qmi;
struct ath12k_wmi_base wmi_ab;
struct completion fw_ready;
+ u8 device_id;
int num_radios;
/* HW channel counters frequency value in hertz common to all MACs */
u32 cc_freq_hz;
+ struct ath12k_dump_file_data *dump_data;
+ size_t ath12k_coredump_len;
+ struct work_struct dump_work;
+
struct ath12k_htc htc;
struct ath12k_dp dp;
@@ -712,6 +899,11 @@ struct ath12k_base {
const struct ath12k_hif_ops *ops;
} hif;
+ struct {
+ struct completion wakeup_completed;
+ u32 wmi_conf_rx_decap_mode;
+ } wow;
+
struct ath12k_ce ce;
struct timer_list rx_replenish_retry;
struct ath12k_hal hal;
@@ -741,15 +933,6 @@ struct ath12k_base {
struct ath12k_pdev __rcu *pdevs_active[MAX_RADIOS];
- /* Holds information of wiphy (hw) registration.
- *
- * In Multi/Single Link Operation case, all pdevs are registered as
- * a single wiphy. In other (legacy/Non-MLO) cases, each pdev is
- * registered as separate wiphys.
- */
- struct ath12k_hw *ah[MAX_RADIOS];
- u8 num_hw;
-
struct ath12k_wmi_hal_reg_capabilities_ext_arg hal_reg_cap[MAX_RADIOS];
unsigned long long free_vdev_map;
unsigned long long free_vdev_stats_id_map;
@@ -782,6 +965,9 @@ struct ath12k_base {
/* Current DFS Regulatory */
enum ath12k_dfs_region dfs_region;
struct ath12k_soc_dp_stats soc_stats;
+#ifdef CONFIG_ATH12K_DEBUGFS
+ struct dentry *debugfs_soc;
+#endif
unsigned long dev_flags;
struct completion driver_recovery;
@@ -791,11 +977,8 @@ struct ath12k_base {
struct work_struct reset_work;
atomic_t reset_count;
atomic_t recovery_count;
- atomic_t recovery_start_count;
bool is_reset;
struct completion reset_complete;
- struct completion reconfigure_complete;
- struct completion recovery_start;
/* continuous recovery fail count */
atomic_t fail_cont_count;
unsigned long reset_fail_timeout;
@@ -843,10 +1026,32 @@ struct ath12k_base {
const struct hal_rx_ops *hal_rx_ops;
- /* slo_capable denotes if the single/multi link operation
- * is supported within the same chip (SoC).
- */
- bool slo_capable;
+ /* Denotes the whether MLO is possible within the chip */
+ bool single_chip_mlo_supp;
+
+ struct completion restart_completed;
+
+#ifdef CONFIG_ACPI
+
+ struct {
+ bool started;
+ u32 func_bit;
+ bool acpi_tas_enable;
+ bool acpi_bios_sar_enable;
+ u8 tas_cfg[ATH12K_ACPI_DSM_TAS_CFG_SIZE];
+ u8 tas_sar_power_table[ATH12K_ACPI_DSM_TAS_DATA_SIZE];
+ u8 bios_sar_data[ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE];
+ u8 geo_offset_data[ATH12K_ACPI_DSM_GEO_OFFSET_DATA_SIZE];
+ u8 cca_data[ATH12K_ACPI_DSM_CCA_DATA_SIZE];
+ u8 band_edge_power[ATH12K_ACPI_DSM_BAND_EDGE_DATA_SIZE];
+ } acpi;
+
+#endif /* CONFIG_ACPI */
+
+ struct notifier_block panic_nb;
+
+ struct ath12k_hw_group *ag;
+ struct ath12k_wsi_info wsi_info;
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
@@ -874,8 +1079,11 @@ int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd
int ath12k_core_check_dt(struct ath12k_base *ath12k);
int ath12k_core_check_smbios(struct ath12k_base *ab);
void ath12k_core_halt(struct ath12k *ar);
+int ath12k_core_resume_early(struct ath12k_base *ab);
int ath12k_core_resume(struct ath12k_base *ab);
int ath12k_core_suspend(struct ath12k_base *ab);
+int ath12k_core_suspend_late(struct ath12k_base *ab);
+void ath12k_core_hw_group_unassign(struct ath12k_base *ab);
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
const char *filename);
@@ -883,6 +1091,8 @@ u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab);
u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab);
u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab);
+void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag);
+
static inline const char *ath12k_scan_state_str(enum ath12k_scan_state state)
{
switch (state) {
@@ -912,16 +1122,26 @@ static inline struct ath12k_skb_rxcb *ATH12K_SKB_RXCB(struct sk_buff *skb)
return (struct ath12k_skb_rxcb *)skb->cb;
}
-static inline struct ath12k_vif *ath12k_vif_to_arvif(struct ieee80211_vif *vif)
+static inline struct ath12k_vif *ath12k_vif_to_ahvif(struct ieee80211_vif *vif)
{
return (struct ath12k_vif *)vif->drv_priv;
}
-static inline struct ath12k_sta *ath12k_sta_to_arsta(struct ieee80211_sta *sta)
+static inline struct ath12k_sta *ath12k_sta_to_ahsta(struct ieee80211_sta *sta)
{
return (struct ath12k_sta *)sta->drv_priv;
}
+static inline struct ieee80211_sta *ath12k_ahsta_to_sta(struct ath12k_sta *ahsta)
+{
+ return container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+}
+
+static inline struct ieee80211_vif *ath12k_ahvif_to_vif(struct ath12k_vif *ahvif)
+{
+ return container_of((void *)ahvif, struct ieee80211_vif, drv_priv);
+}
+
static inline struct ath12k *ath12k_ab_to_ar(struct ath12k_base *ab,
int mac_id)
{
@@ -951,13 +1171,63 @@ static inline struct ath12k_hw *ath12k_hw_to_ah(struct ieee80211_hw *hw)
return hw->priv;
}
-static inline struct ath12k *ath12k_ah_to_ar(struct ath12k_hw *ah)
+static inline struct ath12k *ath12k_ah_to_ar(struct ath12k_hw *ah, u8 hw_link_id)
+{
+ if (WARN(hw_link_id >= ah->num_radio,
+ "bad hw link id %d, so switch to default link\n", hw_link_id))
+ hw_link_id = 0;
+
+ return &ah->radio[hw_link_id];
+}
+
+static inline struct ath12k_hw *ath12k_ar_to_ah(struct ath12k *ar)
{
- return ah->radio;
+ return ar->ah;
}
static inline struct ieee80211_hw *ath12k_ar_to_hw(struct ath12k *ar)
{
return ar->ah->hw;
}
+
+#define for_each_ar(ah, ar, index) \
+ for ((index) = 0; ((index) < (ah)->num_radio && \
+ ((ar) = &(ah)->radio[(index)])); (index)++)
+
+static inline struct ath12k_hw *ath12k_ag_to_ah(struct ath12k_hw_group *ag, int idx)
+{
+ return ag->ah[idx];
+}
+
+static inline void ath12k_ag_set_ah(struct ath12k_hw_group *ag, int idx,
+ struct ath12k_hw *ah)
+{
+ ag->ah[idx] = ah;
+}
+
+static inline struct ath12k_hw_group *ath12k_ab_to_ag(struct ath12k_base *ab)
+{
+ return ab->ag;
+}
+
+static inline void ath12k_core_started(struct ath12k_base *ab)
+{
+ lockdep_assert_held(&ab->ag->mutex);
+
+ ab->ag->num_started++;
+}
+
+static inline void ath12k_core_stopped(struct ath12k_base *ab)
+{
+ lockdep_assert_held(&ab->ag->mutex);
+
+ ab->ag->num_started--;
+}
+
+static inline struct ath12k_base *ath12k_ag_to_ab(struct ath12k_hw_group *ag,
+ u8 device_id)
+{
+ return ag->ab[device_id];
+}
+
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/coredump.c b/drivers/net/wireless/ath/ath12k/coredump.c
new file mode 100644
index 000000000000..ce1beeb54836
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/coredump.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/devcoredump.h>
+#include "hif.h"
+#include "coredump.h"
+#include "debug.h"
+
+enum
+ath12k_fw_crash_dump_type ath12k_coredump_get_dump_type(enum ath12k_qmi_target_mem type)
+{
+ enum ath12k_fw_crash_dump_type dump_type;
+
+ switch (type) {
+ case HOST_DDR_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_REMOTE_MEM_DATA;
+ break;
+ case M3_DUMP_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_M3_DUMP;
+ break;
+ case PAGEABLE_MEM_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_PAGEABLE_DATA;
+ break;
+ case BDF_MEM_REGION_TYPE:
+ case CALDB_MEM_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_NONE;
+ break;
+ case MLO_GLOBAL_MEM_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_MLO_GLOBAL_DATA;
+ break;
+ default:
+ dump_type = FW_CRASH_DUMP_TYPE_MAX;
+ break;
+ }
+
+ return dump_type;
+}
+
+void ath12k_coredump_upload(struct work_struct *work)
+{
+ struct ath12k_base *ab = container_of(work, struct ath12k_base, dump_work);
+
+ ath12k_info(ab, "Uploading coredump\n");
+ /* dev_coredumpv() takes ownership of the buffer */
+ dev_coredumpv(ab->dev, ab->dump_data, ab->ath12k_coredump_len, GFP_KERNEL);
+ ab->dump_data = NULL;
+}
+
+void ath12k_coredump_collect(struct ath12k_base *ab)
+{
+ ath12k_hif_coredump_download(ab);
+}
diff --git a/drivers/net/wireless/ath/ath12k/coredump.h b/drivers/net/wireless/ath/ath12k/coredump.h
new file mode 100644
index 000000000000..13f46a605113
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/coredump.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef _ATH12K_COREDUMP_H_
+#define _ATH12K_COREDUMP_H_
+
+#define ATH12K_FW_CRASH_DUMP_V2 2
+
+enum ath12k_fw_crash_dump_type {
+ FW_CRASH_DUMP_PAGING_DATA,
+ FW_CRASH_DUMP_RDDM_DATA,
+ FW_CRASH_DUMP_REMOTE_MEM_DATA,
+ FW_CRASH_DUMP_PAGEABLE_DATA,
+ FW_CRASH_DUMP_M3_DUMP,
+ FW_CRASH_DUMP_NONE,
+ FW_CRASH_DUMP_MLO_GLOBAL_DATA,
+
+ /* keep last */
+ FW_CRASH_DUMP_TYPE_MAX,
+};
+
+#define COREDUMP_TLV_HDR_SIZE 8
+
+struct ath12k_tlv_dump_data {
+ /* see ath11k_fw_crash_dump_type above */
+ __le32 type;
+
+ /* in bytes */
+ __le32 tlv_len;
+
+ /* pad to 32-bit boundaries as needed */
+ u8 tlv_data[];
+} __packed;
+
+struct ath12k_dump_file_data {
+ /* "ATH12K-FW-DUMP" */
+ char df_magic[16];
+ /* total dump len in bytes */
+ __le32 len;
+ /* file dump version */
+ __le32 version;
+ /* pci device id */
+ __le32 chip_id;
+ /* qrtr instance id */
+ __le32 qrtr_id;
+ /* pci domain id */
+ __le32 bus_id;
+ guid_t guid;
+ /* time-of-day stamp */
+ __le64 tv_sec;
+ /* time-of-day stamp, nano-seconds */
+ __le64 tv_nsec;
+ /* room for growth w/out changing binary format */
+ u8 unused[128];
+ u8 data[];
+} __packed;
+
+#ifdef CONFIG_ATH12K_COREDUMP
+enum ath12k_fw_crash_dump_type ath12k_coredump_get_dump_type
+ (enum ath12k_qmi_target_mem type);
+void ath12k_coredump_upload(struct work_struct *work);
+void ath12k_coredump_collect(struct ath12k_base *ab);
+#else
+static inline enum ath12k_fw_crash_dump_type ath12k_coredump_get_dump_type
+ (enum ath12k_qmi_target_mem type)
+{
+ return FW_CRASH_DUMP_TYPE_MAX;
+}
+
+static inline void ath12k_coredump_upload(struct work_struct *work)
+{
+}
+
+static inline void ath12k_coredump_collect(struct ath12k_base *ab)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/debug.c b/drivers/net/wireless/ath/ath12k/debug.c
index fe5a732ba9ec..ff6eaeafa092 100644
--- a/drivers/net/wireless/ath/ath12k/debug.c
+++ b/drivers/net/wireless/ath/ath12k/debug.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
@@ -36,7 +36,7 @@ void ath12k_err(struct ath12k_base *ab, const char *fmt, ...)
va_end(args);
}
-void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...)
+void __ath12k_warn(struct device *dev, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
@@ -45,7 +45,7 @@ void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...)
va_start(args, fmt);
vaf.va = &args;
- dev_warn_ratelimited(ab->dev, "%pV", &vaf);
+ dev_warn_ratelimited(dev, "%pV", &vaf);
/* TODO: Trace the log */
va_end(args);
}
diff --git a/drivers/net/wireless/ath/ath12k/debug.h b/drivers/net/wireless/ath/ath12k/debug.h
index aa685295f8a4..90e801136bc6 100644
--- a/drivers/net/wireless/ath/ath12k/debug.h
+++ b/drivers/net/wireless/ath/ath12k/debug.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH12K_DEBUG_H_
@@ -25,12 +25,16 @@ enum ath12k_debug_mask {
ATH12K_DBG_PCI = 0x00001000,
ATH12K_DBG_DP_TX = 0x00002000,
ATH12K_DBG_DP_RX = 0x00004000,
+ ATH12K_DBG_WOW = 0x00008000,
ATH12K_DBG_ANY = 0xffffffff,
};
__printf(2, 3) void ath12k_info(struct ath12k_base *ab, const char *fmt, ...);
__printf(2, 3) void ath12k_err(struct ath12k_base *ab, const char *fmt, ...);
-__printf(2, 3) void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...);
+__printf(2, 3) void __ath12k_warn(struct device *dev, const char *fmt, ...);
+
+#define ath12k_warn(ab, fmt, ...) __ath12k_warn((ab)->dev, fmt, ##__VA_ARGS__)
+#define ath12k_hw_warn(ah, fmt, ...) __ath12k_warn((ah)->dev, fmt, ##__VA_ARGS__)
extern unsigned int ath12k_debug_mask;
diff --git a/drivers/net/wireless/ath/ath12k/debugfs.c b/drivers/net/wireless/ath/ath12k/debugfs.c
new file mode 100644
index 000000000000..d4b32d1a431c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/debugfs.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "debugfs.h"
+#include "debugfs_htt_stats.h"
+
+static ssize_t ath12k_write_simulate_radar(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k *ar = file->private_data;
+ int ret;
+
+ wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
+ ret = ath12k_wmi_simulate_radar(ar);
+ if (ret)
+ goto exit;
+
+ ret = count;
+exit:
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
+ return ret;
+}
+
+static const struct file_operations fops_simulate_radar = {
+ .write = ath12k_write_simulate_radar,
+ .open = simple_open
+};
+
+void ath12k_debugfs_soc_create(struct ath12k_base *ab)
+{
+ bool dput_needed;
+ char soc_name[64] = { 0 };
+ struct dentry *debugfs_ath12k;
+
+ debugfs_ath12k = debugfs_lookup("ath12k", NULL);
+ if (debugfs_ath12k) {
+ /* a dentry from lookup() needs dput() after we don't use it */
+ dput_needed = true;
+ } else {
+ debugfs_ath12k = debugfs_create_dir("ath12k", NULL);
+ if (IS_ERR_OR_NULL(debugfs_ath12k))
+ return;
+ dput_needed = false;
+ }
+
+ scnprintf(soc_name, sizeof(soc_name), "%s-%s", ath12k_bus_str(ab->hif.bus),
+ dev_name(ab->dev));
+
+ ab->debugfs_soc = debugfs_create_dir(soc_name, debugfs_ath12k);
+
+ if (dput_needed)
+ dput(debugfs_ath12k);
+}
+
+void ath12k_debugfs_soc_destroy(struct ath12k_base *ab)
+{
+ debugfs_remove_recursive(ab->debugfs_soc);
+ ab->debugfs_soc = NULL;
+ /* We are not removing ath12k directory on purpose, even if it
+ * would be empty. This simplifies the directory handling and it's
+ * a minor cosmetic issue to leave an empty ath12k directory to
+ * debugfs.
+ */
+}
+
+void ath12k_debugfs_register(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ieee80211_hw *hw = ar->ah->hw;
+ char pdev_name[5];
+ char buf[100] = {0};
+
+ scnprintf(pdev_name, sizeof(pdev_name), "%s%d", "mac", ar->pdev_idx);
+
+ ar->debug.debugfs_pdev = debugfs_create_dir(pdev_name, ab->debugfs_soc);
+
+ /* Create a symlink under ieee80211/phy* */
+ scnprintf(buf, sizeof(buf), "../../ath12k/%pd2", ar->debug.debugfs_pdev);
+ ar->debug.debugfs_pdev_symlink = debugfs_create_symlink("ath12k",
+ hw->wiphy->debugfsdir,
+ buf);
+
+ if (ar->mac.sbands[NL80211_BAND_5GHZ].channels) {
+ debugfs_create_file("dfs_simulate_radar", 0200,
+ ar->debug.debugfs_pdev, ar,
+ &fops_simulate_radar);
+ }
+
+ ath12k_debugfs_htt_stats_register(ar);
+}
+
+void ath12k_debugfs_unregister(struct ath12k *ar)
+{
+ if (!ar->debug.debugfs_pdev)
+ return;
+
+ /* Remove symlink under ieee80211/phy* */
+ debugfs_remove(ar->debug.debugfs_pdev_symlink);
+ debugfs_remove_recursive(ar->debug.debugfs_pdev);
+ ar->debug.debugfs_pdev_symlink = NULL;
+ ar->debug.debugfs_pdev = NULL;
+}
diff --git a/drivers/net/wireless/ath/ath12k/debugfs.h b/drivers/net/wireless/ath/ath12k/debugfs.h
new file mode 100644
index 000000000000..8d64ba03aa9a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/debugfs.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ATH12K_DEBUGFS_H_
+#define _ATH12K_DEBUGFS_H_
+
+#ifdef CONFIG_ATH12K_DEBUGFS
+void ath12k_debugfs_soc_create(struct ath12k_base *ab);
+void ath12k_debugfs_soc_destroy(struct ath12k_base *ab);
+void ath12k_debugfs_register(struct ath12k *ar);
+void ath12k_debugfs_unregister(struct ath12k *ar);
+#else
+static inline void ath12k_debugfs_soc_create(struct ath12k_base *ab)
+{
+}
+
+static inline void ath12k_debugfs_soc_destroy(struct ath12k_base *ab)
+{
+}
+
+static inline void ath12k_debugfs_register(struct ath12k *ar)
+{
+}
+
+static inline void ath12k_debugfs_unregister(struct ath12k *ar)
+{
+}
+
+#endif /* CONFIG_ATH12K_DEBUGFS */
+
+#endif /* _ATH12K_DEBUGFS_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
new file mode 100644
index 000000000000..41e4ef2ef3af
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
@@ -0,0 +1,4389 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+#include "core.h"
+#include "debug.h"
+#include "debugfs_htt_stats.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+
+static u32
+print_array_to_buf_index(u8 *buf, u32 offset, const char *header, u32 stats_index,
+ const __le32 *array, u32 array_len, const char *footer)
+{
+ int index = 0;
+ u8 i;
+
+ if (header) {
+ index += scnprintf(buf + offset,
+ ATH12K_HTT_STATS_BUF_SIZE - offset,
+ "%s = ", header);
+ }
+ for (i = 0; i < array_len; i++) {
+ index += scnprintf(buf + offset + index,
+ (ATH12K_HTT_STATS_BUF_SIZE - offset) - index,
+ " %u:%u,", stats_index++, le32_to_cpu(array[i]));
+ }
+ /* To overwrite the last trailing comma */
+ index--;
+ *(buf + offset + index) = '\0';
+
+ if (footer) {
+ index += scnprintf(buf + offset + index,
+ (ATH12K_HTT_STATS_BUF_SIZE - offset) - index,
+ "%s", footer);
+ }
+ return index;
+}
+
+static u32
+print_array_to_buf(u8 *buf, u32 offset, const char *header,
+ const __le32 *array, u32 array_len, const char *footer)
+{
+ return print_array_to_buf_index(buf, offset, header, 0, array, array_len,
+ footer);
+}
+
+static const char *ath12k_htt_ax_tx_rx_ru_size_to_str(u8 ru_size)
+{
+ switch (ru_size) {
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26:
+ return "26";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52:
+ return "52";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106:
+ return "106";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242:
+ return "242";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484:
+ return "484";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996:
+ return "996";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2:
+ return "996x2";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *ath12k_htt_be_tx_rx_ru_size_to_str(u8 ru_size)
+{
+ switch (ru_size) {
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_26:
+ return "26";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_52:
+ return "52";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_52_26:
+ return "52+26";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_106:
+ return "106";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_106_26:
+ return "106+26";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_242:
+ return "242";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_484:
+ return "484";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_484_242:
+ return "484+242";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996:
+ return "996";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996_484:
+ return "996+484";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996_484_242:
+ return "996+484+242";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x2:
+ return "996x2";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x2_484:
+ return "996x2+484";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x3:
+ return "996x3";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x3_484:
+ return "996x3+484";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x4:
+ return "996x4";
+ default:
+ return "unknown";
+ }
+}
+
+static const char*
+ath12k_tx_ru_size_to_str(enum ath12k_htt_stats_ru_type ru_type, u8 ru_size)
+{
+ if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY)
+ return ath12k_htt_ax_tx_rx_ru_size_to_str(ru_size);
+ else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU)
+ return ath12k_htt_be_tx_rx_ru_size_to_str(ru_size);
+ else
+ return "unknown";
+}
+
+static void
+htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_stats_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "comp_delivered = %u\n",
+ le32_to_cpu(htt_stats_buf->comp_delivered));
+ len += scnprintf(buf + len, buf_len - len, "self_triggers = %u\n",
+ le32_to_cpu(htt_stats_buf->self_triggers));
+ len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n",
+ le32_to_cpu(htt_stats_buf->hw_queued));
+ len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n",
+ le32_to_cpu(htt_stats_buf->hw_reaped));
+ len += scnprintf(buf + len, buf_len - len, "underrun = %u\n",
+ le32_to_cpu(htt_stats_buf->underrun));
+ len += scnprintf(buf + len, buf_len - len, "hw_paused = %u\n",
+ le32_to_cpu(htt_stats_buf->hw_paused));
+ len += scnprintf(buf + len, buf_len - len, "hw_flush = %u\n",
+ le32_to_cpu(htt_stats_buf->hw_flush));
+ len += scnprintf(buf + len, buf_len - len, "hw_filt = %u\n",
+ le32_to_cpu(htt_stats_buf->hw_filt));
+ len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_abort));
+ len += scnprintf(buf + len, buf_len - len, "ppdu_ok = %u\n",
+ le32_to_cpu(htt_stats_buf->ppdu_ok));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_requeued = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_requed));
+ len += scnprintf(buf + len, buf_len - len, "tx_xretry = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_xretry));
+ len += scnprintf(buf + len, buf_len - len, "data_rc = %u\n",
+ le32_to_cpu(htt_stats_buf->data_rc));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_dropped_xretry = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_dropped_xretry));
+ len += scnprintf(buf + len, buf_len - len, "illegal_rate_phy_err = %u\n",
+ le32_to_cpu(htt_stats_buf->illgl_rate_phy_err));
+ len += scnprintf(buf + len, buf_len - len, "cont_xretry = %u\n",
+ le32_to_cpu(htt_stats_buf->cont_xretry));
+ len += scnprintf(buf + len, buf_len - len, "tx_timeout = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_timeout));
+ len += scnprintf(buf + len, buf_len - len, "tx_time_dur_data = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_time_dur_data));
+ len += scnprintf(buf + len, buf_len - len, "pdev_resets = %u\n",
+ le32_to_cpu(htt_stats_buf->pdev_resets));
+ len += scnprintf(buf + len, buf_len - len, "phy_underrun = %u\n",
+ le32_to_cpu(htt_stats_buf->phy_underrun));
+ len += scnprintf(buf + len, buf_len - len, "txop_ovf = %u\n",
+ le32_to_cpu(htt_stats_buf->txop_ovf));
+ len += scnprintf(buf + len, buf_len - len, "seq_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_posted));
+ len += scnprintf(buf + len, buf_len - len, "seq_failed_queueing = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_failed_queueing));
+ len += scnprintf(buf + len, buf_len - len, "seq_completed = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_completed));
+ len += scnprintf(buf + len, buf_len - len, "seq_restarted = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_restarted));
+ len += scnprintf(buf + len, buf_len - len, "seq_txop_repost_stop = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_txop_repost_stop));
+ len += scnprintf(buf + len, buf_len - len, "next_seq_cancel = %u\n",
+ le32_to_cpu(htt_stats_buf->next_seq_cancel));
+ len += scnprintf(buf + len, buf_len - len, "dl_mu_mimo_seq_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_seq_posted));
+ len += scnprintf(buf + len, buf_len - len, "dl_mu_ofdma_seq_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_ofdma_seq_posted));
+ len += scnprintf(buf + len, buf_len - len, "ul_mu_mimo_seq_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_mumimo_seq_posted));
+ len += scnprintf(buf + len, buf_len - len, "ul_mu_ofdma_seq_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_ofdma_seq_posted));
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_peer_blacklisted = %u\n",
+ le32_to_cpu(htt_stats_buf->num_mu_peer_blacklisted));
+ len += scnprintf(buf + len, buf_len - len, "seq_qdepth_repost_stop = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_qdepth_repost_stop));
+ len += scnprintf(buf + len, buf_len - len, "seq_min_msdu_repost_stop = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_min_msdu_repost_stop));
+ len += scnprintf(buf + len, buf_len - len, "mu_seq_min_msdu_repost_stop = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_seq_min_msdu_repost_stop));
+ len += scnprintf(buf + len, buf_len - len, "seq_switch_hw_paused = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_switch_hw_paused));
+ len += scnprintf(buf + len, buf_len - len, "next_seq_posted_dsr = %u\n",
+ le32_to_cpu(htt_stats_buf->next_seq_posted_dsr));
+ len += scnprintf(buf + len, buf_len - len, "seq_posted_isr = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_posted_isr));
+ len += scnprintf(buf + len, buf_len - len, "seq_ctrl_cached = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_ctrl_cached));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_count_tqm = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_count_tqm));
+ len += scnprintf(buf + len, buf_len - len, "msdu_count_tqm = %u\n",
+ le32_to_cpu(htt_stats_buf->msdu_count_tqm));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_removed_tqm = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_removed_tqm));
+ len += scnprintf(buf + len, buf_len - len, "msdu_removed_tqm = %u\n",
+ le32_to_cpu(htt_stats_buf->msdu_removed_tqm));
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdus_max_retries = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_mpdus_max_retries));
+ len += scnprintf(buf + len, buf_len - len, "mpdus_sw_flush = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdus_sw_flush));
+ len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdus_hw_filter));
+ len += scnprintf(buf + len, buf_len - len, "mpdus_truncated = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdus_truncated));
+ len += scnprintf(buf + len, buf_len - len, "mpdus_ack_failed = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdus_ack_failed));
+ len += scnprintf(buf + len, buf_len - len, "mpdus_expired = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdus_expired));
+ len += scnprintf(buf + len, buf_len - len, "mpdus_seq_hw_retry = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdus_seq_hw_retry));
+ len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+ le32_to_cpu(htt_stats_buf->ack_tlv_proc));
+ len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt_valid = %u\n",
+ le32_to_cpu(htt_stats_buf->coex_abort_mpdu_cnt_valid));
+ len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->coex_abort_mpdu_cnt));
+ len += scnprintf(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u\n",
+ le32_to_cpu(htt_stats_buf->num_total_ppdus_tried_ota));
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u\n",
+ le32_to_cpu(htt_stats_buf->num_data_ppdus_tried_ota));
+ len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_enqued = %u\n",
+ le32_to_cpu(htt_stats_buf->local_ctrl_mgmt_enqued));
+ len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_freed = %u\n",
+ le32_to_cpu(htt_stats_buf->local_ctrl_mgmt_freed));
+ len += scnprintf(buf + len, buf_len - len, "local_data_enqued = %u\n",
+ le32_to_cpu(htt_stats_buf->local_data_enqued));
+ len += scnprintf(buf + len, buf_len - len, "local_data_freed = %u\n",
+ le32_to_cpu(htt_stats_buf->local_data_freed));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_tried));
+ len += scnprintf(buf + len, buf_len - len, "isr_wait_seq_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->isr_wait_seq_posted));
+ len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_low = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_active_dur_us_low));
+ len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_high = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_active_dur_us_high));
+ len += scnprintf(buf + len, buf_len - len, "fes_offsets_err_cnt = %u\n\n",
+ le32_to_cpu(htt_stats_buf->fes_offsets_err_cnt));
+
+ stats_req->buf_len = len;
+}
+
+static void
+htt_print_tx_pdev_stats_urrn_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_stats_urrn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ HTT_TX_PDEV_MAX_URRN_STATS);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_URRN_TLV:\n");
+
+ len += print_array_to_buf(buf, len, "urrn_stats", htt_stats_buf->urrn_stats,
+ num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+htt_print_tx_pdev_stats_flush_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_stats_flush_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ ATH12K_HTT_TX_PDEV_MAX_FLUSH_REASON_STATS);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_FLUSH_TLV:\n");
+
+ len += print_array_to_buf(buf, len, "flush_errs", htt_stats_buf->flush_errs,
+ num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+htt_print_tx_pdev_stats_sifs_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_stats_sifs_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_STATS);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SIFS_TLV:\n");
+
+ len += print_array_to_buf(buf, len, "sifs_status", htt_stats_buf->sifs_status,
+ num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+htt_print_tx_pdev_mu_ppdu_dist_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_mu_ppdu_dist_stats_tlv *htt_stats_buf = tag_buf;
+ char *mode;
+ u8 j, hw_mode, i, str_buf_len;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 stats_value;
+ u8 max_ppdu = ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST;
+ u8 max_sched = ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS;
+ char str_buf[ATH12K_HTT_MAX_STRING_LEN];
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ hw_mode = le32_to_cpu(htt_stats_buf->hw_mode);
+
+ switch (hw_mode) {
+ case ATH12K_HTT_STATS_HWMODE_AC:
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_AC_MU_PPDU_DISTRIBUTION_STATS:\n");
+ mode = "ac";
+ break;
+ case ATH12K_HTT_STATS_HWMODE_AX:
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_AX_MU_PPDU_DISTRIBUTION_STATS:\n");
+ mode = "ax";
+ break;
+ case ATH12K_HTT_STATS_HWMODE_BE:
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_BE_MU_PPDU_DISTRIBUTION_STATS:\n");
+ mode = "be";
+ break;
+ default:
+ return;
+ }
+
+ for (i = 0; i < ATH12K_HTT_STATS_NUM_NR_BINS ; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "%s_mu_mimo_num_seq_posted_nr%u = %u\n", mode,
+ ((i + 1) * 4), htt_stats_buf->num_seq_posted[i]);
+ str_buf_len = 0;
+ memset(str_buf, 0x0, sizeof(str_buf));
+ for (j = 0; j < ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST ; j++) {
+ stats_value = le32_to_cpu(htt_stats_buf->num_ppdu_posted_per_burst
+ [i * max_ppdu + j]);
+ str_buf_len += scnprintf(&str_buf[str_buf_len],
+ ATH12K_HTT_MAX_STRING_LEN - str_buf_len,
+ " %u:%u,", j, stats_value);
+ }
+ /* To overwrite the last trailing comma */
+ str_buf[str_buf_len - 1] = '\0';
+ len += scnprintf(buf + len, buf_len - len,
+ "%s_mu_mimo_num_ppdu_posted_per_burst_nr%u = %s\n",
+ mode, ((i + 1) * 4), str_buf);
+ str_buf_len = 0;
+ memset(str_buf, 0x0, sizeof(str_buf));
+ for (j = 0; j < ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST ; j++) {
+ stats_value = le32_to_cpu(htt_stats_buf->num_ppdu_cmpl_per_burst
+ [i * max_ppdu + j]);
+ str_buf_len += scnprintf(&str_buf[str_buf_len],
+ ATH12K_HTT_MAX_STRING_LEN - str_buf_len,
+ " %u:%u,", j, stats_value);
+ }
+ /* To overwrite the last trailing comma */
+ str_buf[str_buf_len - 1] = '\0';
+ len += scnprintf(buf + len, buf_len - len,
+ "%s_mu_mimo_num_ppdu_completed_per_burst_nr%u = %s\n",
+ mode, ((i + 1) * 4), str_buf);
+ str_buf_len = 0;
+ memset(str_buf, 0x0, sizeof(str_buf));
+ for (j = 0; j < ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS ; j++) {
+ stats_value = le32_to_cpu(htt_stats_buf->num_seq_term_status
+ [i * max_sched + j]);
+ str_buf_len += scnprintf(&str_buf[str_buf_len],
+ ATH12K_HTT_MAX_STRING_LEN - str_buf_len,
+ " %u:%u,", j, stats_value);
+ }
+ /* To overwrite the last trailing comma */
+ str_buf[str_buf_len - 1] = '\0';
+ len += scnprintf(buf + len, buf_len - len,
+ "%s_mu_mimo_num_seq_term_status_nr%u = %s\n\n",
+ mode, ((i + 1) * 4), str_buf);
+ }
+
+ stats_req->buf_len = len;
+}
+
+static void
+htt_print_tx_pdev_stats_sifs_hist_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_stats_sifs_hist_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SIFS_HIST_TLV:\n");
+
+ len += print_array_to_buf(buf, len, "sifs_hist_status",
+ htt_stats_buf->sifs_hist_status, num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+htt_print_pdev_ctrl_path_tx_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_ctrl_path_tx_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_CTRL_PATH_TX_STATS:\n");
+ len += print_array_to_buf(buf, len, "fw_tx_mgmt_subtype",
+ htt_stats_buf->fw_tx_mgmt_subtype,
+ ATH12K_HTT_STATS_SUBTYPE_MAX, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_stats_tx_sched_cmn_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_stats_tx_sched_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_TX_SCHED_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "current_timestamp = %u\n\n",
+ le32_to_cpu(htt_stats_buf->current_timestamp));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_pdev_stats_sched_per_txq_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_stats_sched_per_txq_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word,
+ ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "txq_id = %u\n",
+ u32_get_bits(mac_id_word,
+ ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID));
+ len += scnprintf(buf + len, buf_len - len, "sched_policy = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_policy));
+ len += scnprintf(buf + len, buf_len - len,
+ "last_sched_cmd_posted_timestamp = %u\n",
+ le32_to_cpu(htt_stats_buf->last_sched_cmd_posted_timestamp));
+ len += scnprintf(buf + len, buf_len - len,
+ "last_sched_cmd_compl_timestamp = %u\n",
+ le32_to_cpu(htt_stats_buf->last_sched_cmd_compl_timestamp));
+ len += scnprintf(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_2_tac_lwm_count));
+ len += scnprintf(buf + len, buf_len - len, "sched_2_tac_ring_full = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_2_tac_ring_full));
+ len += scnprintf(buf + len, buf_len - len, "sched_cmd_post_failure = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_cmd_post_failure));
+ len += scnprintf(buf + len, buf_len - len, "num_active_tids = %u\n",
+ le32_to_cpu(htt_stats_buf->num_active_tids));
+ len += scnprintf(buf + len, buf_len - len, "num_ps_schedules = %u\n",
+ le32_to_cpu(htt_stats_buf->num_ps_schedules));
+ len += scnprintf(buf + len, buf_len - len, "sched_cmds_pending = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_cmds_pending));
+ len += scnprintf(buf + len, buf_len - len, "num_tid_register = %u\n",
+ le32_to_cpu(htt_stats_buf->num_tid_register));
+ len += scnprintf(buf + len, buf_len - len, "num_tid_unregister = %u\n",
+ le32_to_cpu(htt_stats_buf->num_tid_unregister));
+ len += scnprintf(buf + len, buf_len - len, "num_qstats_queried = %u\n",
+ le32_to_cpu(htt_stats_buf->num_qstats_queried));
+ len += scnprintf(buf + len, buf_len - len, "qstats_update_pending = %u\n",
+ le32_to_cpu(htt_stats_buf->qstats_update_pending));
+ len += scnprintf(buf + len, buf_len - len, "last_qstats_query_timestamp = %u\n",
+ le32_to_cpu(htt_stats_buf->last_qstats_query_timestamp));
+ len += scnprintf(buf + len, buf_len - len, "num_tqm_cmdq_full = %u\n",
+ le32_to_cpu(htt_stats_buf->num_tqm_cmdq_full));
+ len += scnprintf(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->num_de_sched_algo_trigger));
+ len += scnprintf(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->num_rt_sched_algo_trigger));
+ len += scnprintf(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->num_tqm_sched_algo_trigger));
+ len += scnprintf(buf + len, buf_len - len, "notify_sched = %u\n",
+ le32_to_cpu(htt_stats_buf->notify_sched));
+ len += scnprintf(buf + len, buf_len - len, "dur_based_sendn_term = %u\n",
+ le32_to_cpu(htt_stats_buf->dur_based_sendn_term));
+ len += scnprintf(buf + len, buf_len - len, "su_notify2_sched = %u\n",
+ le32_to_cpu(htt_stats_buf->su_notify2_sched));
+ len += scnprintf(buf + len, buf_len - len, "su_optimal_queued_msdus_sched = %u\n",
+ le32_to_cpu(htt_stats_buf->su_optimal_queued_msdus_sched));
+ len += scnprintf(buf + len, buf_len - len, "su_delay_timeout_sched = %u\n",
+ le32_to_cpu(htt_stats_buf->su_delay_timeout_sched));
+ len += scnprintf(buf + len, buf_len - len, "su_min_txtime_sched_delay = %u\n",
+ le32_to_cpu(htt_stats_buf->su_min_txtime_sched_delay));
+ len += scnprintf(buf + len, buf_len - len, "su_no_delay = %u\n",
+ le32_to_cpu(htt_stats_buf->su_no_delay));
+ len += scnprintf(buf + len, buf_len - len, "num_supercycles = %u\n",
+ le32_to_cpu(htt_stats_buf->num_supercycles));
+ len += scnprintf(buf + len, buf_len - len, "num_subcycles_with_sort = %u\n",
+ le32_to_cpu(htt_stats_buf->num_subcycles_with_sort));
+ len += scnprintf(buf + len, buf_len - len, "num_subcycles_no_sort = %u\n\n",
+ le32_to_cpu(htt_stats_buf->num_subcycles_no_sort));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sched_txq_cmd_posted_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sched_txq_cmd_posted_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elements = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_POSTED_TLV:\n");
+ len += print_array_to_buf(buf, len, "sched_cmd_posted",
+ htt_stats_buf->sched_cmd_posted, num_elements, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sched_txq_cmd_reaped_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sched_txq_cmd_reaped_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elements = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_REAPED_TLV:\n");
+ len += print_array_to_buf(buf, len, "sched_cmd_reaped",
+ htt_stats_buf->sched_cmd_reaped, num_elements, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sched_txq_sched_order_su_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sched_txq_sched_order_su_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 sched_order_su_num_entries = min_t(u32, (tag_len >> 2),
+ ATH12K_HTT_TX_PDEV_NUM_SCHED_ORDER_LOG);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV:\n");
+ len += print_array_to_buf(buf, len, "sched_order_su",
+ htt_stats_buf->sched_order_su,
+ sched_order_su_num_entries, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sched_txq_sched_ineligibility_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sched_txq_sched_ineligibility_tlv *htt_stats_buf =
+ tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 sched_ineligibility_num_entries = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SCHED_INELIGIBILITY:\n");
+ len += print_array_to_buf(buf, len, "sched_ineligibility",
+ htt_stats_buf->sched_ineligibility,
+ sched_ineligibility_num_entries, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sched_txq_supercycle_trigger_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sched_txq_supercycle_triggers_tlv *htt_stats_buf =
+ tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_MAX);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SUPERCYCLE_TRIGGER:\n");
+ len += print_array_to_buf(buf, len, "supercycle_triggers",
+ htt_stats_buf->supercycle_triggers, num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_hw_stats_pdev_errs_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_hw_stats_pdev_errs_tlv *htt_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_buf))
+ return;
+
+ mac_id_word = le32_to_cpu(htt_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_PDEV_ERRS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n",
+ le32_to_cpu(htt_buf->tx_abort));
+ len += scnprintf(buf + len, buf_len - len, "tx_abort_fail_count = %u\n",
+ le32_to_cpu(htt_buf->tx_abort_fail_count));
+ len += scnprintf(buf + len, buf_len - len, "rx_abort = %u\n",
+ le32_to_cpu(htt_buf->rx_abort));
+ len += scnprintf(buf + len, buf_len - len, "rx_abort_fail_count = %u\n",
+ le32_to_cpu(htt_buf->rx_abort_fail_count));
+ len += scnprintf(buf + len, buf_len - len, "rx_flush_cnt = %u\n",
+ le32_to_cpu(htt_buf->rx_flush_cnt));
+ len += scnprintf(buf + len, buf_len - len, "warm_reset = %u\n",
+ le32_to_cpu(htt_buf->warm_reset));
+ len += scnprintf(buf + len, buf_len - len, "cold_reset = %u\n",
+ le32_to_cpu(htt_buf->cold_reset));
+ len += scnprintf(buf + len, buf_len - len, "mac_cold_reset_restore_cal = %u\n",
+ le32_to_cpu(htt_buf->mac_cold_reset_restore_cal));
+ len += scnprintf(buf + len, buf_len - len, "mac_cold_reset = %u\n",
+ le32_to_cpu(htt_buf->mac_cold_reset));
+ len += scnprintf(buf + len, buf_len - len, "mac_warm_reset = %u\n",
+ le32_to_cpu(htt_buf->mac_warm_reset));
+ len += scnprintf(buf + len, buf_len - len, "mac_only_reset = %u\n",
+ le32_to_cpu(htt_buf->mac_only_reset));
+ len += scnprintf(buf + len, buf_len - len, "phy_warm_reset = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset));
+ len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_ucode_trig = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_ucode_trig));
+ len += scnprintf(buf + len, buf_len - len, "mac_warm_reset_restore_cal = %u\n",
+ le32_to_cpu(htt_buf->mac_warm_reset_restore_cal));
+ len += scnprintf(buf + len, buf_len - len, "mac_sfm_reset = %u\n",
+ le32_to_cpu(htt_buf->mac_sfm_reset));
+ len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_m3_ssr = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_m3_ssr));
+ len += scnprintf(buf + len, buf_len - len, "fw_rx_rings_reset = %u\n",
+ le32_to_cpu(htt_buf->fw_rx_rings_reset));
+ len += scnprintf(buf + len, buf_len - len, "tx_flush = %u\n",
+ le32_to_cpu(htt_buf->tx_flush));
+ len += scnprintf(buf + len, buf_len - len, "tx_glb_reset = %u\n",
+ le32_to_cpu(htt_buf->tx_glb_reset));
+ len += scnprintf(buf + len, buf_len - len, "tx_txq_reset = %u\n",
+ le32_to_cpu(htt_buf->tx_txq_reset));
+ len += scnprintf(buf + len, buf_len - len, "rx_timeout_reset = %u\n\n",
+ le32_to_cpu(htt_buf->rx_timeout_reset));
+
+ len += scnprintf(buf + len, buf_len - len, "PDEV_PHY_WARM_RESET_REASONS:\n");
+ len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_reason_phy_m3 = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_phy_m3));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_tx_hw_stuck = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_hw_stuck));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_num_cca_rx_frame_stuck = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_num_rx_frame_stuck));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_wal_rx_recovery_rst_rx_busy = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_wal_rx_rec_rx_busy));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_wal_rx_recovery_rst_mac_hang = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_wal_rx_rec_mac_hng));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_mac_reset_converted_phy_reset = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_mac_conv_phy_reset));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_tx_lifetime_expiry_cca_stuck = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_exp_cca_stuck));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_tx_consecutive_flush9_war = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_consec_flsh_war));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_tx_hwsch_reset_war = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_hwsch_reset_war));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_hwsch_wdog_or_cca_wdog_war = %u\n\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_hwsch_cca_wdog_war));
+
+ len += scnprintf(buf + len, buf_len - len, "WAL_RX_RECOVERY_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "wal_rx_recovery_rst_mac_hang_count = %u\n",
+ le32_to_cpu(htt_buf->wal_rx_recovery_rst_mac_hang_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "wal_rx_recovery_rst_known_sig_count = %u\n",
+ le32_to_cpu(htt_buf->wal_rx_recovery_rst_known_sig_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "wal_rx_recovery_rst_no_rx_count = %u\n",
+ le32_to_cpu(htt_buf->wal_rx_recovery_rst_no_rx_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "wal_rx_recovery_rst_no_rx_consecutive_count = %u\n",
+ le32_to_cpu(htt_buf->wal_rx_recovery_rst_no_rx_consec_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "wal_rx_recovery_rst_rx_busy_count = %u\n",
+ le32_to_cpu(htt_buf->wal_rx_recovery_rst_rx_busy_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "wal_rx_recovery_rst_phy_mac_hang_count = %u\n\n",
+ le32_to_cpu(htt_buf->wal_rx_recovery_rst_phy_mac_hang_cnt));
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_DEST_DRAIN_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rx_descs_leak_prevention_done = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rx_descs_leak_prevented));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rx_descs_saved_cnt = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rx_descs_saved_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rxdma2reo_leak_detected = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rxdma2reo_leak_detected));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rxdma2fw_leak_detected = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rxdma2fw_leak_detected));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rxdma2wbm_leak_detected = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rxdma2wbm_leak_detected));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rxdma1_2sw_leak_detected = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rxdma1_2sw_leak_detected));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rx_drain_ok_mac_idle = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rx_drain_ok_mac_idle));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_ok_mac_not_idle = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_ok_mac_not_idle));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_prerequisite_invld = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_prerequisite_invld));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_skip_for_non_lmac_reset = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_skip_non_lmac_reset));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_hw_fifo_not_empty_post_drain_wait = %u\n\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_hw_fifo_notempty_post_wait));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_hw_stats_intr_misc_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_hw_stats_intr_misc_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "hw_intr_name = %s\n",
+ htt_stats_buf->hw_intr_name);
+ len += scnprintf(buf + len, buf_len - len, "mask = %u\n",
+ le32_to_cpu(htt_stats_buf->mask));
+ len += scnprintf(buf + len, buf_len - len, "count = %u\n\n",
+ le32_to_cpu(htt_stats_buf->count));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_hw_stats_whal_tx_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_hw_stats_whal_tx_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WHAL_TX_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "last_unpause_ppdu_id = %u\n",
+ le32_to_cpu(htt_stats_buf->last_unpause_ppdu_id));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_unpause_wait_tqm_write = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_unpause_wait_tqm_write));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_dummy_tlv_skipped = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_dummy_tlv_skipped));
+ len += scnprintf(buf + len, buf_len - len,
+ "hwsch_misaligned_offset_received = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_misaligned_offset_received));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_reset_count = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_reset_count));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_dev_reset_war = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_dev_reset_war));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_delayed_pause = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_delayed_pause));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_long_delayed_pause = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_long_delayed_pause));
+ len += scnprintf(buf + len, buf_len - len, "sch_rx_ppdu_no_response = %u\n",
+ le32_to_cpu(htt_stats_buf->sch_rx_ppdu_no_response));
+ len += scnprintf(buf + len, buf_len - len, "sch_selfgen_response = %u\n",
+ le32_to_cpu(htt_stats_buf->sch_selfgen_response));
+ len += scnprintf(buf + len, buf_len - len, "sch_rx_sifs_resp_trigger= %u\n\n",
+ le32_to_cpu(htt_stats_buf->sch_rx_sifs_resp_trigger));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_hw_war_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_hw_war_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 fixed_len, array_len;
+ u8 i, array_words;
+ u32 mac_id;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id = __le32_to_cpu(htt_stats_buf->mac_id__word);
+ fixed_len = sizeof(*htt_stats_buf);
+ array_len = tag_len - fixed_len;
+ array_words = array_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_WAR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id, ATH12K_HTT_STATS_MAC_ID));
+
+ for (i = 0; i < array_words; i++) {
+ len += scnprintf(buf + len, buf_len - len, "hw_war %u = %u\n\n",
+ i, le32_to_cpu(htt_stats_buf->hw_wars[i]));
+ }
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_tqm_cmn_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_tqm_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "max_cmdq_id = %u\n",
+ le32_to_cpu(htt_stats_buf->max_cmdq_id));
+ len += scnprintf(buf + len, buf_len - len, "list_mpdu_cnt_hist_intvl = %u\n",
+ le32_to_cpu(htt_stats_buf->list_mpdu_cnt_hist_intvl));
+ len += scnprintf(buf + len, buf_len - len, "add_msdu = %u\n",
+ le32_to_cpu(htt_stats_buf->add_msdu));
+ len += scnprintf(buf + len, buf_len - len, "q_empty = %u\n",
+ le32_to_cpu(htt_stats_buf->q_empty));
+ len += scnprintf(buf + len, buf_len - len, "q_not_empty = %u\n",
+ le32_to_cpu(htt_stats_buf->q_not_empty));
+ len += scnprintf(buf + len, buf_len - len, "drop_notification = %u\n",
+ le32_to_cpu(htt_stats_buf->drop_notification));
+ len += scnprintf(buf + len, buf_len - len, "desc_threshold = %u\n",
+ le32_to_cpu(htt_stats_buf->desc_threshold));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_tqm_invalid_status = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_tqm_invalid_status));
+ len += scnprintf(buf + len, buf_len - len, "missed_tqm_gen_mpdus = %u\n",
+ le32_to_cpu(htt_stats_buf->missed_tqm_gen_mpdus));
+ len += scnprintf(buf + len, buf_len - len,
+ "total_msduq_timestamp_updates = %u\n",
+ le32_to_cpu(htt_stats_buf->msduq_timestamp_updates));
+ len += scnprintf(buf + len, buf_len - len,
+ "total_msduq_timestamp_updates_by_get_mpdu_head_info_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->msduq_updates_mpdu_head_info_cmd));
+ len += scnprintf(buf + len, buf_len - len,
+ "total_msduq_timestamp_updates_by_emp_to_nonemp_status = %u\n",
+ le32_to_cpu(htt_stats_buf->msduq_updates_emp_to_nonemp_status));
+ len += scnprintf(buf + len, buf_len - len,
+ "total_get_mpdu_head_info_cmds_by_sched_algo_la_query = %u\n",
+ le32_to_cpu(htt_stats_buf->get_mpdu_head_info_cmds_by_query));
+ len += scnprintf(buf + len, buf_len - len,
+ "total_get_mpdu_head_info_cmds_by_tac = %u\n",
+ le32_to_cpu(htt_stats_buf->get_mpdu_head_info_cmds_by_tac));
+ len += scnprintf(buf + len, buf_len - len,
+ "total_gen_mpdu_cmds_by_sched_algo_la_query = %u\n",
+ le32_to_cpu(htt_stats_buf->gen_mpdu_cmds_by_query));
+ len += scnprintf(buf + len, buf_len - len, "active_tqm_tids = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_active_tids));
+ len += scnprintf(buf + len, buf_len - len, "inactive_tqm_tids = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_inactive_tids));
+ len += scnprintf(buf + len, buf_len - len, "tqm_active_msduq_flows = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_active_msduq_flows));
+ len += scnprintf(buf + len, buf_len - len, "hi_prio_q_not_empty = %u\n\n",
+ le32_to_cpu(htt_stats_buf->high_prio_q_not_empty));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_tqm_error_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_tqm_error_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_ERROR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "q_empty_failure = %u\n",
+ le32_to_cpu(htt_stats_buf->q_empty_failure));
+ len += scnprintf(buf + len, buf_len - len, "q_not_empty_failure = %u\n",
+ le32_to_cpu(htt_stats_buf->q_not_empty_failure));
+ len += scnprintf(buf + len, buf_len - len, "add_msdu_failure = %u\n\n",
+ le32_to_cpu(htt_stats_buf->add_msdu_failure));
+
+ len += scnprintf(buf + len, buf_len - len, "TQM_ERROR_RESET_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "tqm_cache_ctl_err = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_cache_ctl_err));
+ len += scnprintf(buf + len, buf_len - len, "tqm_soft_reset = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_soft_reset));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_total_num_in_use_link_descs = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_num_in_use_link_descs));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_worst_case_num_lost_link_descs = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_num_lost_link_descs));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_worst_case_num_lost_host_tx_bufs_count = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_num_lost_host_tx_buf_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_num_in_use_link_descs_internal_tqm = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_num_in_use_internal_tqm));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_num_in_use_link_descs_wbm_idle_link_ring = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_num_in_use_idle_link_rng));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_time_to_tqm_hang_delta_ms = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_time_to_tqm_hang_delta_ms));
+ len += scnprintf(buf + len, buf_len - len, "tqm_reset_recovery_time_ms = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_recovery_time_ms));
+ len += scnprintf(buf + len, buf_len - len, "tqm_reset_num_peers_hdl = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_num_peers_hdl));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_cumm_dirty_hw_mpduq_proc_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_cumm_dirty_hw_mpduq_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_cumm_dirty_hw_msduq_proc = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_cumm_dirty_hw_msduq_proc));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_flush_cache_cmd_su_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_su_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_flush_cache_cmd_other_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_other_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_flush_cache_cmd_trig_type = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_trig_type));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_flush_cache_cmd_trig_cfg = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_trig_cfg));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_flush_cache_cmd_skip_cmd_status_null = %u\n\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_flush_cmd_skp_status_null));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_tqm_gen_mpdu_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_tqm_gen_mpdu_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elements = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_GEN_MPDU_STATS_TLV:\n");
+ len += print_array_to_buf(buf, len, "gen_mpdu_end_reason",
+ htt_stats_buf->gen_mpdu_end_reason, num_elements,
+ "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_tqm_list_mpdu_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_tqm_list_mpdu_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_END_REASON);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_STATS_TLV:\n");
+ len += print_array_to_buf(buf, len, "list_mpdu_end_reason",
+ htt_stats_buf->list_mpdu_end_reason, num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_tqm_list_mpdu_cnt_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_tqm_list_mpdu_cnt_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:\n");
+ len += print_array_to_buf(buf, len, "list_mpdu_cnt_hist",
+ htt_stats_buf->list_mpdu_cnt_hist, num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_tqm_pdev_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_tqm_pdev_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_PDEV_STATS_TLV_V:\n");
+ len += scnprintf(buf + len, buf_len - len, "msdu_count = %u\n",
+ le32_to_cpu(htt_stats_buf->msdu_count));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_count = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_count));
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_msdu));
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_mpdu));
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_msdu_ttl));
+ len += scnprintf(buf + len, buf_len - len, "send_bar = %u\n",
+ le32_to_cpu(htt_stats_buf->send_bar));
+ len += scnprintf(buf + len, buf_len - len, "bar_sync = %u\n",
+ le32_to_cpu(htt_stats_buf->bar_sync));
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu = %u\n",
+ le32_to_cpu(htt_stats_buf->notify_mpdu));
+ len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->sync_cmd));
+ len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->write_cmd));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_trigger));
+ len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+ le32_to_cpu(htt_stats_buf->ack_tlv_proc));
+ len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->gen_mpdu_cmd));
+ len += scnprintf(buf + len, buf_len - len, "gen_list_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->gen_list_cmd));
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_mpdu_cmd));
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu_tried_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_mpdu_tried_cmd));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_queue_stats_cmd));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_head_info_cmd));
+ len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->msdu_flow_stats_cmd));
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_msdu_cmd));
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_msdu_ttl_cmd));
+ len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->flush_cache_cmd));
+ len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->update_mpduq_cmd));
+ len += scnprintf(buf + len, buf_len - len, "enqueue = %u\n",
+ le32_to_cpu(htt_stats_buf->enqueue));
+ len += scnprintf(buf + len, buf_len - len, "enqueue_notify = %u\n",
+ le32_to_cpu(htt_stats_buf->enqueue_notify));
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu_at_head = %u\n",
+ le32_to_cpu(htt_stats_buf->notify_mpdu_at_head));
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu_state_valid = %u\n",
+ le32_to_cpu(htt_stats_buf->notify_mpdu_state_valid));
+ len += scnprintf(buf + len, buf_len - len, "sched_udp_notify1 = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_udp_notify1));
+ len += scnprintf(buf + len, buf_len - len, "sched_udp_notify2 = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_udp_notify2));
+ len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify1 = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_nonudp_notify1));
+ len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify2 = %u\n\n",
+ le32_to_cpu(htt_stats_buf->sched_nonudp_notify2));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_de_cmn_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_de_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "tcl2fw_entry_count = %u\n",
+ le32_to_cpu(htt_stats_buf->tcl2fw_entry_count));
+ len += scnprintf(buf + len, buf_len - len, "not_to_fw = %u\n",
+ le32_to_cpu(htt_stats_buf->not_to_fw));
+ len += scnprintf(buf + len, buf_len - len, "invalid_pdev_vdev_peer = %u\n",
+ le32_to_cpu(htt_stats_buf->invalid_pdev_vdev_peer));
+ len += scnprintf(buf + len, buf_len - len, "tcl_res_invalid_addrx = %u\n",
+ le32_to_cpu(htt_stats_buf->tcl_res_invalid_addrx));
+ len += scnprintf(buf + len, buf_len - len, "wbm2fw_entry_count = %u\n",
+ le32_to_cpu(htt_stats_buf->wbm2fw_entry_count));
+ len += scnprintf(buf + len, buf_len - len, "invalid_pdev = %u\n",
+ le32_to_cpu(htt_stats_buf->invalid_pdev));
+ len += scnprintf(buf + len, buf_len - len, "tcl_res_addrx_timeout = %u\n",
+ le32_to_cpu(htt_stats_buf->tcl_res_addrx_timeout));
+ len += scnprintf(buf + len, buf_len - len, "invalid_vdev = %u\n",
+ le32_to_cpu(htt_stats_buf->invalid_vdev));
+ len += scnprintf(buf + len, buf_len - len, "invalid_tcl_exp_frame_desc = %u\n",
+ le32_to_cpu(htt_stats_buf->invalid_tcl_exp_frame_desc));
+ len += scnprintf(buf + len, buf_len - len, "vdev_id_mismatch_count = %u\n\n",
+ le32_to_cpu(htt_stats_buf->vdev_id_mismatch_cnt));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_de_eapol_packets_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_de_eapol_packets_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_EAPOL_PACKETS_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "m1_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->m1_packets));
+ len += scnprintf(buf + len, buf_len - len, "m2_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->m2_packets));
+ len += scnprintf(buf + len, buf_len - len, "m3_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->m3_packets));
+ len += scnprintf(buf + len, buf_len - len, "m4_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->m4_packets));
+ len += scnprintf(buf + len, buf_len - len, "g1_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->g1_packets));
+ len += scnprintf(buf + len, buf_len - len, "g2_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->g2_packets));
+ len += scnprintf(buf + len, buf_len - len, "rc4_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->rc4_packets));
+ len += scnprintf(buf + len, buf_len - len, "eap_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->eap_packets));
+ len += scnprintf(buf + len, buf_len - len, "eapol_start_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->eapol_start_packets));
+ len += scnprintf(buf + len, buf_len - len, "eapol_logoff_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->eapol_logoff_packets));
+ len += scnprintf(buf + len, buf_len - len, "eapol_encap_asf_packets = %u\n\n",
+ le32_to_cpu(htt_stats_buf->eapol_encap_asf_packets));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_de_classify_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_de_classify_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CLASSIFY_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "arp_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->arp_packets));
+ len += scnprintf(buf + len, buf_len - len, "igmp_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->igmp_packets));
+ len += scnprintf(buf + len, buf_len - len, "dhcp_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->dhcp_packets));
+ len += scnprintf(buf + len, buf_len - len, "host_inspected = %u\n",
+ le32_to_cpu(htt_stats_buf->host_inspected));
+ len += scnprintf(buf + len, buf_len - len, "htt_included = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_included));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_mcs = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_mcs));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_nss = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_nss));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_preamble_type = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_preamble_type));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_chainmask = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_chainmask));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_guard_interval = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_guard_interval));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_retries = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_retries));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_bw_info = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_bw_info));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_power = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_power));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_key_flags = 0x%x\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_key_flags));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_no_encryption = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_no_encryption));
+ len += scnprintf(buf + len, buf_len - len, "fse_entry_count = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_entry_count));
+ len += scnprintf(buf + len, buf_len - len, "fse_priority_be = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_priority_be));
+ len += scnprintf(buf + len, buf_len - len, "fse_priority_high = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_priority_high));
+ len += scnprintf(buf + len, buf_len - len, "fse_priority_low = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_priority_low));
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_be = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_be));
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_over_sub = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_over_sub));
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_bursty = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_bursty));
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_interactive = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_interactive));
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_periodic = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_periodic));
+ len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_alloc = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_hwqueue_alloc));
+ len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_created = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_hwqueue_created));
+ len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_send_to_host = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_hwqueue_send_to_host));
+ len += scnprintf(buf + len, buf_len - len, "mcast_entry = %u\n",
+ le32_to_cpu(htt_stats_buf->mcast_entry));
+ len += scnprintf(buf + len, buf_len - len, "bcast_entry = %u\n",
+ le32_to_cpu(htt_stats_buf->bcast_entry));
+ len += scnprintf(buf + len, buf_len - len, "htt_update_peer_cache = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_update_peer_cache));
+ len += scnprintf(buf + len, buf_len - len, "htt_learning_frame = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_learning_frame));
+ len += scnprintf(buf + len, buf_len - len, "fse_invalid_peer = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_invalid_peer));
+ len += scnprintf(buf + len, buf_len - len, "mec_notify = %u\n\n",
+ le32_to_cpu(htt_stats_buf->mec_notify));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_de_classify_failed_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_de_classify_failed_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_CLASSIFY_FAILED_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ap_bss_peer_not_found = %u\n",
+ le32_to_cpu(htt_stats_buf->ap_bss_peer_not_found));
+ len += scnprintf(buf + len, buf_len - len, "ap_bcast_mcast_no_peer = %u\n",
+ le32_to_cpu(htt_stats_buf->ap_bcast_mcast_no_peer));
+ len += scnprintf(buf + len, buf_len - len, "sta_delete_in_progress = %u\n",
+ le32_to_cpu(htt_stats_buf->sta_delete_in_progress));
+ len += scnprintf(buf + len, buf_len - len, "ibss_no_bss_peer = %u\n",
+ le32_to_cpu(htt_stats_buf->ibss_no_bss_peer));
+ len += scnprintf(buf + len, buf_len - len, "invalid_vdev_type = %u\n",
+ le32_to_cpu(htt_stats_buf->invalid_vdev_type));
+ len += scnprintf(buf + len, buf_len - len, "invalid_ast_peer_entry = %u\n",
+ le32_to_cpu(htt_stats_buf->invalid_ast_peer_entry));
+ len += scnprintf(buf + len, buf_len - len, "peer_entry_invalid = %u\n",
+ le32_to_cpu(htt_stats_buf->peer_entry_invalid));
+ len += scnprintf(buf + len, buf_len - len, "ethertype_not_ip = %u\n",
+ le32_to_cpu(htt_stats_buf->ethertype_not_ip));
+ len += scnprintf(buf + len, buf_len - len, "eapol_lookup_failed = %u\n",
+ le32_to_cpu(htt_stats_buf->eapol_lookup_failed));
+ len += scnprintf(buf + len, buf_len - len, "qpeer_not_allow_data = %u\n",
+ le32_to_cpu(htt_stats_buf->qpeer_not_allow_data));
+ len += scnprintf(buf + len, buf_len - len, "fse_tid_override = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_tid_override));
+ len += scnprintf(buf + len, buf_len - len, "ipv6_jumbogram_zero_length = %u\n",
+ le32_to_cpu(htt_stats_buf->ipv6_jumbogram_zero_length));
+ len += scnprintf(buf + len, buf_len - len, "qos_to_non_qos_in_prog = %u\n",
+ le32_to_cpu(htt_stats_buf->qos_to_non_qos_in_prog));
+ len += scnprintf(buf + len, buf_len - len, "ap_bcast_mcast_eapol = %u\n",
+ le32_to_cpu(htt_stats_buf->ap_bcast_mcast_eapol));
+ len += scnprintf(buf + len, buf_len - len, "unicast_on_ap_bss_peer = %u\n",
+ le32_to_cpu(htt_stats_buf->unicast_on_ap_bss_peer));
+ len += scnprintf(buf + len, buf_len - len, "ap_vdev_invalid = %u\n",
+ le32_to_cpu(htt_stats_buf->ap_vdev_invalid));
+ len += scnprintf(buf + len, buf_len - len, "incomplete_llc = %u\n",
+ le32_to_cpu(htt_stats_buf->incomplete_llc));
+ len += scnprintf(buf + len, buf_len - len, "eapol_duplicate_m3 = %u\n",
+ le32_to_cpu(htt_stats_buf->eapol_duplicate_m3));
+ len += scnprintf(buf + len, buf_len - len, "eapol_duplicate_m4 = %u\n\n",
+ le32_to_cpu(htt_stats_buf->eapol_duplicate_m4));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_de_classify_status_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_de_classify_status_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_CLASSIFY_STATUS_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "eok = %u\n",
+ le32_to_cpu(htt_stats_buf->eok));
+ len += scnprintf(buf + len, buf_len - len, "classify_done = %u\n",
+ le32_to_cpu(htt_stats_buf->classify_done));
+ len += scnprintf(buf + len, buf_len - len, "lookup_failed = %u\n",
+ le32_to_cpu(htt_stats_buf->lookup_failed));
+ len += scnprintf(buf + len, buf_len - len, "send_host_dhcp = %u\n",
+ le32_to_cpu(htt_stats_buf->send_host_dhcp));
+ len += scnprintf(buf + len, buf_len - len, "send_host_mcast = %u\n",
+ le32_to_cpu(htt_stats_buf->send_host_mcast));
+ len += scnprintf(buf + len, buf_len - len, "send_host_unknown_dest = %u\n",
+ le32_to_cpu(htt_stats_buf->send_host_unknown_dest));
+ len += scnprintf(buf + len, buf_len - len, "send_host = %u\n",
+ le32_to_cpu(htt_stats_buf->send_host));
+ len += scnprintf(buf + len, buf_len - len, "status_invalid = %u\n\n",
+ le32_to_cpu(htt_stats_buf->status_invalid));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_de_enqueue_packets_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_de_enqueue_packets_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_ENQUEUE_PACKETS_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "enqueued_pkts = %u\n",
+ le32_to_cpu(htt_stats_buf->enqueued_pkts));
+ len += scnprintf(buf + len, buf_len - len, "to_tqm = %u\n",
+ le32_to_cpu(htt_stats_buf->to_tqm));
+ len += scnprintf(buf + len, buf_len - len, "to_tqm_bypass = %u\n\n",
+ le32_to_cpu(htt_stats_buf->to_tqm_bypass));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_de_enqueue_discard_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_de_enqueue_discard_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_ENQUEUE_DISCARD_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "discarded_pkts = %u\n",
+ le32_to_cpu(htt_stats_buf->discarded_pkts));
+ len += scnprintf(buf + len, buf_len - len, "local_frames = %u\n",
+ le32_to_cpu(htt_stats_buf->local_frames));
+ len += scnprintf(buf + len, buf_len - len, "is_ext_msdu = %u\n\n",
+ le32_to_cpu(htt_stats_buf->is_ext_msdu));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_de_compl_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_de_compl_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_COMPL_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "tcl_dummy_frame = %u\n",
+ le32_to_cpu(htt_stats_buf->tcl_dummy_frame));
+ len += scnprintf(buf + len, buf_len - len, "tqm_dummy_frame = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_dummy_frame));
+ len += scnprintf(buf + len, buf_len - len, "tqm_notify_frame = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_notify_frame));
+ len += scnprintf(buf + len, buf_len - len, "fw2wbm_enq = %u\n",
+ le32_to_cpu(htt_stats_buf->fw2wbm_enq));
+ len += scnprintf(buf + len, buf_len - len, "tqm_bypass_frame = %u\n\n",
+ le32_to_cpu(htt_stats_buf->tqm_bypass_frame));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_selfgen_cmn_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_selfgen_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "su_bar = %u\n",
+ le32_to_cpu(htt_stats_buf->su_bar));
+ len += scnprintf(buf + len, buf_len - len, "rts = %u\n",
+ le32_to_cpu(htt_stats_buf->rts));
+ len += scnprintf(buf + len, buf_len - len, "cts2self = %u\n",
+ le32_to_cpu(htt_stats_buf->cts2self));
+ len += scnprintf(buf + len, buf_len - len, "qos_null = %u\n",
+ le32_to_cpu(htt_stats_buf->qos_null));
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_1 = %u\n",
+ le32_to_cpu(htt_stats_buf->delayed_bar_1));
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_2 = %u\n",
+ le32_to_cpu(htt_stats_buf->delayed_bar_2));
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_3 = %u\n",
+ le32_to_cpu(htt_stats_buf->delayed_bar_3));
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_4 = %u\n",
+ le32_to_cpu(htt_stats_buf->delayed_bar_4));
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_5 = %u\n",
+ le32_to_cpu(htt_stats_buf->delayed_bar_5));
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_6 = %u\n",
+ le32_to_cpu(htt_stats_buf->delayed_bar_6));
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_7 = %u\n\n",
+ le32_to_cpu(htt_stats_buf->delayed_bar_7));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_selfgen_ac_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_selfgen_ac_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->ac_su_ndpa));
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndp_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->ac_su_ndp));
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->ac_mu_mimo_ndpa));
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->ac_mu_mimo_ndp));
+ len += print_array_to_buf_index(buf, len, "ac_mu_mimo_brpollX_tried = ", 1,
+ htt_stats_buf->ac_mu_mimo_brpoll,
+ ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS - 1,
+ "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_selfgen_ax_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_su_ndpa));
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndp_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_su_ndp));
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_mu_mimo_ndpa));
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_mu_mimo_ndp));
+ len += print_array_to_buf_index(buf, len, "ax_mu_mimo_brpollX_tried = ", 1,
+ htt_stats_buf->ax_mu_mimo_brpoll,
+ ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS - 1, "\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_basic_trigger));
+ len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_total_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_ulmumimo_trigger));
+ len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_bsr_trigger));
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_mu_bar_trigger));
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger = %u\n\n",
+ le32_to_cpu(htt_stats_buf->ax_mu_rts_trigger));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_selfgen_be_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_selfgen_be_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_BE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "be_su_ndpa_queued = %u\n",
+ le32_to_cpu(htt_stats_buf->be_su_ndpa_queued));
+ len += scnprintf(buf + len, buf_len - len, "be_su_ndpa_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->be_su_ndpa));
+ len += scnprintf(buf + len, buf_len - len, "be_su_ndp_queued = %u\n",
+ le32_to_cpu(htt_stats_buf->be_su_ndp_queued));
+ len += scnprintf(buf + len, buf_len - len, "be_su_ndp_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->be_su_ndp));
+ len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndpa_queued = %u\n",
+ le32_to_cpu(htt_stats_buf->be_mu_mimo_ndpa_queued));
+ len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndpa_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->be_mu_mimo_ndpa));
+ len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndp_queued = %u\n",
+ le32_to_cpu(htt_stats_buf->be_mu_mimo_ndp_queued));
+ len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndp_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->be_mu_mimo_ndp));
+ len += print_array_to_buf_index(buf, len, "be_mu_mimo_brpollX_queued = ", 1,
+ htt_stats_buf->be_mu_mimo_brpoll_queued,
+ ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1,
+ "\n");
+ len += print_array_to_buf_index(buf, len, "be_mu_mimo_brpollX_tried = ", 1,
+ htt_stats_buf->be_mu_mimo_brpoll,
+ ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1,
+ "\n");
+ len += print_array_to_buf(buf, len, "be_ul_mumimo_trigger = ",
+ htt_stats_buf->be_ul_mumimo_trigger,
+ ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS, "\n");
+ len += scnprintf(buf + len, buf_len - len, "be_basic_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->be_basic_trigger));
+ len += scnprintf(buf + len, buf_len - len, "be_ulmumimo_total_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->be_ulmumimo_trigger));
+ len += scnprintf(buf + len, buf_len - len, "be_bsr_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->be_bsr_trigger));
+ len += scnprintf(buf + len, buf_len - len, "be_mu_bar_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->be_mu_bar_trigger));
+ len += scnprintf(buf + len, buf_len - len, "be_mu_rts_trigger = %u\n\n",
+ le32_to_cpu(htt_stats_buf->be_mu_rts_trigger));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_selfgen_ac_err_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_selfgen_ac_err_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_ERR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndp_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ac_su_ndp_err));
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ac_su_ndpa_err));
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ac_mu_mimo_ndpa_err));
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ac_mu_mimo_ndp_err));
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp1_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ac_mu_mimo_brp1_err));
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp2_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ac_mu_mimo_brp2_err));
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp3_err = %u\n\n",
+ le32_to_cpu(htt_stats_buf->ac_mu_mimo_brp3_err));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_selfgen_ax_err_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_selfgen_ax_err_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_ERR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndp_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_su_ndp_err));
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_su_ndpa_err));
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_mu_mimo_ndpa_err));
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_mu_mimo_ndp_err));
+ len += print_array_to_buf_index(buf, len, "ax_mu_mimo_brpX_err", 1,
+ htt_stats_buf->ax_mu_mimo_brp_err,
+ ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS - 1,
+ "\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_basic_trigger_err));
+ len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_total_trigger_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_ulmumimo_trigger_err));
+ len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_bsr_trigger_err));
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_mu_bar_trigger_err));
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger_err = %u\n\n",
+ le32_to_cpu(htt_stats_buf->ax_mu_rts_trigger_err));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_selfgen_be_err_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_selfgen_be_err_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_BE_ERR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "be_su_ndp_err = %u\n",
+ le32_to_cpu(htt_stats_buf->be_su_ndp_err));
+ len += scnprintf(buf + len, buf_len - len, "be_su_ndp_flushed = %u\n",
+ le32_to_cpu(htt_stats_buf->be_su_ndp_flushed));
+ len += scnprintf(buf + len, buf_len - len, "be_su_ndpa_err = %u\n",
+ le32_to_cpu(htt_stats_buf->be_su_ndpa_err));
+ len += scnprintf(buf + len, buf_len - len, "be_su_ndpa_flushed = %u\n",
+ le32_to_cpu(htt_stats_buf->be_su_ndpa_flushed));
+ len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndpa_err = %u\n",
+ le32_to_cpu(htt_stats_buf->be_mu_mimo_ndpa_err));
+ len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndpa_flushed = %u\n",
+ le32_to_cpu(htt_stats_buf->be_mu_mimo_ndpa_flushed));
+ len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndp_err = %u\n",
+ le32_to_cpu(htt_stats_buf->be_mu_mimo_ndp_err));
+ len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndp_flushed = %u\n",
+ le32_to_cpu(htt_stats_buf->be_mu_mimo_ndp_flushed));
+ len += print_array_to_buf_index(buf, len, "be_mu_mimo_brpX_err", 1,
+ htt_stats_buf->be_mu_mimo_brp_err,
+ ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1,
+ "\n");
+ len += print_array_to_buf_index(buf, len, "be_mu_mimo_brpollX_flushed", 1,
+ htt_stats_buf->be_mu_mimo_brpoll_flushed,
+ ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1,
+ "\n");
+ len += print_array_to_buf(buf, len, "be_mu_mimo_num_cbf_rcvd_on_brp_err",
+ htt_stats_buf->be_mu_mimo_brp_err_num_cbf_rxd,
+ ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS, "\n");
+ len += print_array_to_buf(buf, len, "be_ul_mumimo_trigger_err",
+ htt_stats_buf->be_ul_mumimo_trigger_err,
+ ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS, "\n");
+ len += scnprintf(buf + len, buf_len - len, "be_basic_trigger_err = %u\n",
+ le32_to_cpu(htt_stats_buf->be_basic_trigger_err));
+ len += scnprintf(buf + len, buf_len - len, "be_ulmumimo_total_trig_err = %u\n",
+ le32_to_cpu(htt_stats_buf->be_ulmumimo_trigger_err));
+ len += scnprintf(buf + len, buf_len - len, "be_bsr_trigger_err = %u\n",
+ le32_to_cpu(htt_stats_buf->be_bsr_trigger_err));
+ len += scnprintf(buf + len, buf_len - len, "be_mu_bar_trigger_err = %u\n",
+ le32_to_cpu(htt_stats_buf->be_mu_bar_trigger_err));
+ len += scnprintf(buf + len, buf_len - len, "be_mu_rts_trigger_err = %u\n\n",
+ le32_to_cpu(htt_stats_buf->be_mu_rts_trigger_err));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_selfgen_ac_sched_status_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats)
+{
+ const struct ath12k_htt_tx_selfgen_ac_sched_status_stats_tlv *htt_stats_buf =
+ tag_buf;
+ u8 *buf = stats->buf;
+ u32 len = stats->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_SELFGEN_AC_SCHED_STATUS_STATS_TLV:\n");
+ len += print_array_to_buf(buf, len, "ac_su_ndpa_sch_status",
+ htt_stats_buf->ac_su_ndpa_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ac_su_ndp_sch_status",
+ htt_stats_buf->ac_su_ndp_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ac_mu_mimo_ndpa_sch_status",
+ htt_stats_buf->ac_mu_mimo_ndpa_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ac_mu_mimo_ndp_sch_status",
+ htt_stats_buf->ac_mu_mimo_ndp_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ac_mu_mimo_brp_sch_status",
+ htt_stats_buf->ac_mu_mimo_brp_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ac_su_ndp_sch_flag_err",
+ htt_stats_buf->ac_su_ndp_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n");
+ len += print_array_to_buf(buf, len, "ac_mu_mimo_ndp_sch_flag_err",
+ htt_stats_buf->ac_mu_mimo_ndp_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n");
+ len += print_array_to_buf(buf, len, "ac_mu_mimo_brp_sch_flag_err",
+ htt_stats_buf->ac_mu_mimo_brp_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n\n");
+
+ stats->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_selfgen_ax_sched_status_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats)
+{
+ const struct ath12k_htt_tx_selfgen_ax_sched_status_stats_tlv *htt_stats_buf =
+ tag_buf;
+ u8 *buf = stats->buf;
+ u32 len = stats->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_SELFGEN_AX_SCHED_STATUS_STATS_TLV:\n");
+ len += print_array_to_buf(buf, len, "ax_su_ndpa_sch_status",
+ htt_stats_buf->ax_su_ndpa_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ax_su_ndp_sch_status",
+ htt_stats_buf->ax_su_ndp_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ax_mu_mimo_ndpa_sch_status",
+ htt_stats_buf->ax_mu_mimo_ndpa_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ax_mu_mimo_ndp_sch_status",
+ htt_stats_buf->ax_mu_mimo_ndp_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ax_mu_brp_sch_status",
+ htt_stats_buf->ax_mu_brp_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ax_mu_bar_sch_status",
+ htt_stats_buf->ax_mu_bar_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ax_basic_trig_sch_status",
+ htt_stats_buf->ax_basic_trig_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ax_su_ndp_sch_flag_err",
+ htt_stats_buf->ax_su_ndp_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n");
+ len += print_array_to_buf(buf, len, "ax_mu_mimo_ndp_sch_flag_err",
+ htt_stats_buf->ax_mu_mimo_ndp_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n");
+ len += print_array_to_buf(buf, len, "ax_mu_brp_sch_flag_err",
+ htt_stats_buf->ax_mu_brp_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n");
+ len += print_array_to_buf(buf, len, "ax_mu_bar_sch_flag_err",
+ htt_stats_buf->ax_mu_bar_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n");
+ len += print_array_to_buf(buf, len, "ax_basic_trig_sch_flag_err",
+ htt_stats_buf->ax_basic_trig_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n");
+ len += print_array_to_buf(buf, len, "ax_ulmumimo_trig_sch_status",
+ htt_stats_buf->ax_ulmumimo_trig_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ax_ulmumimo_trig_sch_flag_err",
+ htt_stats_buf->ax_ulmumimo_trig_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n\n");
+
+ stats->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_selfgen_be_sched_status_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats)
+{
+ const struct ath12k_htt_tx_selfgen_be_sched_status_stats_tlv *htt_stats_buf =
+ tag_buf;
+ u8 *buf = stats->buf;
+ u32 len = stats->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_SELFGEN_BE_SCHED_STATUS_STATS_TLV:\n");
+ len += print_array_to_buf(buf, len, "be_su_ndpa_sch_status",
+ htt_stats_buf->be_su_ndpa_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "be_su_ndp_sch_status",
+ htt_stats_buf->be_su_ndp_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "be_mu_mimo_ndpa_sch_status",
+ htt_stats_buf->be_mu_mimo_ndpa_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "be_mu_mimo_ndp_sch_status",
+ htt_stats_buf->be_mu_mimo_ndp_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "be_mu_brp_sch_status",
+ htt_stats_buf->be_mu_brp_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "be_mu_bar_sch_status",
+ htt_stats_buf->be_mu_bar_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "be_basic_trig_sch_status",
+ htt_stats_buf->be_basic_trig_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "be_su_ndp_sch_flag_err",
+ htt_stats_buf->be_su_ndp_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n");
+ len += print_array_to_buf(buf, len, "be_mu_mimo_ndp_sch_flag_err",
+ htt_stats_buf->be_mu_mimo_ndp_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n");
+ len += print_array_to_buf(buf, len, "be_mu_brp_sch_flag_err",
+ htt_stats_buf->be_mu_brp_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n");
+ len += print_array_to_buf(buf, len, "be_mu_bar_sch_flag_err",
+ htt_stats_buf->be_mu_bar_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n");
+ len += print_array_to_buf(buf, len, "be_basic_trig_sch_flag_err",
+ htt_stats_buf->be_basic_trig_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n");
+ len += print_array_to_buf(buf, len, "be_basic_trig_sch_flag_err",
+ htt_stats_buf->be_basic_trig_sch_flag_err,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "be_ulmumimo_trig_sch_flag_err",
+ htt_stats_buf->be_ulmumimo_trig_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n\n");
+
+ stats->buf_len = len;
+}
+
+static void
+ath12k_htt_print_stats_string_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_stats_string_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 i;
+ u16 index = 0;
+ u32 datum;
+ char data[ATH12K_HTT_MAX_STRING_LEN] = {0};
+
+ tag_len = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_STRING_TLV:\n");
+ for (i = 0; i < tag_len; i++) {
+ datum = __le32_to_cpu(htt_stats_buf->data[i]);
+ index += scnprintf(&data[index], ATH12K_HTT_MAX_STRING_LEN - index,
+ "%.*s", 4, (char *)&datum);
+ if (index >= ATH12K_HTT_MAX_STRING_LEN)
+ break;
+ }
+ len += scnprintf(buf + len, buf_len - len, "data = %s\n\n", data);
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sring_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sring_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+ u32 avail_words;
+ u32 head_tail_ptr;
+ u32 sring_stat;
+ u32 tail_ptr;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__ring_id__arena__ep);
+ avail_words = __le32_to_cpu(htt_stats_buf->num_avail_words__num_valid_words);
+ head_tail_ptr = __le32_to_cpu(htt_stats_buf->head_ptr__tail_ptr);
+ sring_stat = __le32_to_cpu(htt_stats_buf->consumer_empty__producer_full);
+ tail_ptr = __le32_to_cpu(htt_stats_buf->prefetch_count__internal_tail_ptr);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SRING_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_SRING_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "ring_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_SRING_STATS_RING_ID));
+ len += scnprintf(buf + len, buf_len - len, "arena = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_SRING_STATS_ARENA));
+ len += scnprintf(buf + len, buf_len - len, "ep = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_SRING_STATS_EP));
+ len += scnprintf(buf + len, buf_len - len, "base_addr_lsb = 0x%x\n",
+ le32_to_cpu(htt_stats_buf->base_addr_lsb));
+ len += scnprintf(buf + len, buf_len - len, "base_addr_msb = 0x%x\n",
+ le32_to_cpu(htt_stats_buf->base_addr_msb));
+ len += scnprintf(buf + len, buf_len - len, "ring_size = %u\n",
+ le32_to_cpu(htt_stats_buf->ring_size));
+ len += scnprintf(buf + len, buf_len - len, "elem_size = %u\n",
+ le32_to_cpu(htt_stats_buf->elem_size));
+ len += scnprintf(buf + len, buf_len - len, "num_avail_words = %u\n",
+ u32_get_bits(avail_words,
+ ATH12K_HTT_SRING_STATS_NUM_AVAIL_WORDS));
+ len += scnprintf(buf + len, buf_len - len, "num_valid_words = %u\n",
+ u32_get_bits(avail_words,
+ ATH12K_HTT_SRING_STATS_NUM_VALID_WORDS));
+ len += scnprintf(buf + len, buf_len - len, "head_ptr = %u\n",
+ u32_get_bits(head_tail_ptr, ATH12K_HTT_SRING_STATS_HEAD_PTR));
+ len += scnprintf(buf + len, buf_len - len, "tail_ptr = %u\n",
+ u32_get_bits(head_tail_ptr, ATH12K_HTT_SRING_STATS_TAIL_PTR));
+ len += scnprintf(buf + len, buf_len - len, "consumer_empty = %u\n",
+ u32_get_bits(sring_stat,
+ ATH12K_HTT_SRING_STATS_CONSUMER_EMPTY));
+ len += scnprintf(buf + len, buf_len - len, "producer_full = %u\n",
+ u32_get_bits(head_tail_ptr,
+ ATH12K_HTT_SRING_STATS_PRODUCER_FULL));
+ len += scnprintf(buf + len, buf_len - len, "prefetch_count = %u\n",
+ u32_get_bits(tail_ptr, ATH12K_HTT_SRING_STATS_PREFETCH_COUNT));
+ len += scnprintf(buf + len, buf_len - len, "internal_tail_ptr = %u\n\n",
+ u32_get_bits(tail_ptr,
+ ATH12K_HTT_SRING_STATS_INTERNAL_TAIL_PTR));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sfm_cmn_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sfm_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "buf_total = %u\n",
+ le32_to_cpu(htt_stats_buf->buf_total));
+ len += scnprintf(buf + len, buf_len - len, "mem_empty = %u\n",
+ le32_to_cpu(htt_stats_buf->mem_empty));
+ len += scnprintf(buf + len, buf_len - len, "deallocate_bufs = %u\n",
+ le32_to_cpu(htt_stats_buf->deallocate_bufs));
+ len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n",
+ le32_to_cpu(htt_stats_buf->num_records));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sfm_client_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sfm_client_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "client_id = %u\n",
+ le32_to_cpu(htt_stats_buf->client_id));
+ len += scnprintf(buf + len, buf_len - len, "buf_min = %u\n",
+ le32_to_cpu(htt_stats_buf->buf_min));
+ len += scnprintf(buf + len, buf_len - len, "buf_max = %u\n",
+ le32_to_cpu(htt_stats_buf->buf_max));
+ len += scnprintf(buf + len, buf_len - len, "buf_busy = %u\n",
+ le32_to_cpu(htt_stats_buf->buf_busy));
+ len += scnprintf(buf + len, buf_len - len, "buf_alloc = %u\n",
+ le32_to_cpu(htt_stats_buf->buf_alloc));
+ len += scnprintf(buf + len, buf_len - len, "buf_avail = %u\n",
+ le32_to_cpu(htt_stats_buf->buf_avail));
+ len += scnprintf(buf + len, buf_len - len, "num_users = %u\n\n",
+ le32_to_cpu(htt_stats_buf->num_users));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sfm_client_user_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sfm_client_user_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_USER_TLV:\n");
+ len += print_array_to_buf(buf, len, "dwords_used_by_user_n",
+ htt_stats_buf->dwords_used_by_user_n,
+ num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_pdev_mu_mimo_sch_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 i;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_SCH_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_mimo_sch_posted));
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_failed = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_mimo_sch_failed));
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_mimo_ppdu_posted));
+ len += scnprintf(buf + len, buf_len - len,
+ "\nac_mu_mimo_sch_posted_per_group_index %u (SU) = %u\n", 0,
+ le32_to_cpu(htt_stats_buf->ac_mu_mimo_per_grp_sz[0]));
+ for (i = 1; i < ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_sch_posted_per_group_index %u ", i);
+ len += scnprintf(buf + len, buf_len - len,
+ "(TOTAL STREAMS = %u) = %u\n", i + 1,
+ le32_to_cpu(htt_stats_buf->ac_mu_mimo_per_grp_sz[i]));
+ }
+
+ for (i = 0; i < ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_sch_posted_per_group_index %u ",
+ i + ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS);
+ len += scnprintf(buf + len, buf_len - len,
+ "(TOTAL STREAMS = %u) = %u\n",
+ i + ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS + 1,
+ le32_to_cpu(htt_stats_buf->ac_mu_mimo_grp_sz_ext[i]));
+ }
+
+ len += scnprintf(buf + len, buf_len - len,
+ "\nax_mu_mimo_sch_posted_per_group_index %u (SU) = %u\n", 0,
+ le32_to_cpu(htt_stats_buf->ax_mu_mimo_per_grp_sz[0]));
+ for (i = 1; i < ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_sch_posted_per_group_index %u ", i);
+ len += scnprintf(buf + len, buf_len - len,
+ "(TOTAL STREAMS = %u) = %u\n", i + 1,
+ le32_to_cpu(htt_stats_buf->ax_mu_mimo_per_grp_sz[i]));
+ }
+
+ len += scnprintf(buf + len, buf_len - len,
+ "\nbe_mu_mimo_sch_posted_per_group_index %u (SU) = %u\n", 0,
+ le32_to_cpu(htt_stats_buf->be_mu_mimo_per_grp_sz[0]));
+ for (i = 1; i < ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "be_mu_mimo_sch_posted_per_group_index %u ", i);
+ len += scnprintf(buf + len, buf_len - len,
+ "(TOTAL STREAMS = %u) = %u\n", i + 1,
+ le32_to_cpu(htt_stats_buf->be_mu_mimo_per_grp_sz[i]));
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n11ac MU_MIMO SCH STATS:\n");
+ for (i = 0; i < ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_sch_nusers_");
+ len += scnprintf(buf + len, buf_len - len, "%u = %u\n", i,
+ le32_to_cpu(htt_stats_buf->ac_mu_mimo_sch_nusers[i]));
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n11ax MU_MIMO SCH STATS:\n");
+ for (i = 0; i < ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_sch_nusers_");
+ len += scnprintf(buf + len, buf_len - len, "%u = %u\n", i,
+ le32_to_cpu(htt_stats_buf->ax_mu_mimo_sch_nusers[i]));
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n11be MU_MIMO SCH STATS:\n");
+ for (i = 0; i < ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_sch_nusers_");
+ len += scnprintf(buf + len, buf_len - len, "%u = %u\n", i,
+ le32_to_cpu(htt_stats_buf->be_mu_mimo_sch_nusers[i]));
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n11ax OFDMA SCH STATS:\n");
+ for (i = 0; i < ATH12K_HTT_TX_NUM_OFDMA_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_sch_nusers_%u = %u\n", i,
+ le32_to_cpu(htt_stats_buf->ax_ofdma_sch_nusers[i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_ofdma_basic_sch_nusers_%u = %u\n", i,
+ le32_to_cpu(htt_stats_buf->ax_ul_ofdma_nusers[i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_ofdma_bsr_sch_nusers_%u = %u\n", i,
+ le32_to_cpu(htt_stats_buf->ax_ul_ofdma_bsr_nusers[i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_ofdma_bar_sch_nusers_%u = %u\n", i,
+ le32_to_cpu(htt_stats_buf->ax_ul_ofdma_bar_nusers[i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_ofdma_brp_sch_nusers_%u = %u\n\n", i,
+ le32_to_cpu(htt_stats_buf->ax_ul_ofdma_brp_nusers[i]));
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "11ax UL MUMIMO SCH STATS:\n");
+ for (i = 0; i < ATH12K_HTT_TX_NUM_UL_MUMIMO_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_mumimo_basic_sch_nusers_%u = %u\n", i,
+ le32_to_cpu(htt_stats_buf->ax_ul_mumimo_nusers[i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_mumimo_brp_sch_nusers_%u = %u\n\n", i,
+ le32_to_cpu(htt_stats_buf->ax_ul_mumimo_brp_nusers[i]));
+ }
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_pdev_mumimo_grp_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_mumimo_grp_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ int j;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_MUMIMO_GRP_STATS:\n");
+ len += print_array_to_buf(buf, len,
+ "dl_mumimo_grp_tputs_observed (per bin = 300 mbps)",
+ htt_stats_buf->dl_mumimo_grp_tputs,
+ ATH12K_HTT_STATS_MUMIMO_TPUT_NUM_BINS, "\n");
+ len += print_array_to_buf(buf, len, "dl_mumimo_grp eligible",
+ htt_stats_buf->dl_mumimo_grp_eligible,
+ ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ, "\n");
+ len += print_array_to_buf(buf, len, "dl_mumimo_grp_ineligible",
+ htt_stats_buf->dl_mumimo_grp_ineligible,
+ ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ, "\n");
+ len += scnprintf(buf + len, buf_len - len, "dl_mumimo_grp_invalid:\n");
+ for (j = 0; j < ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ; j++) {
+ len += scnprintf(buf + len, buf_len - len, "grp_id = %u", j);
+ len += print_array_to_buf(buf, len, "",
+ htt_stats_buf->dl_mumimo_grp_invalid,
+ ATH12K_HTT_STATS_MAX_INVALID_REASON_CODE,
+ "\n");
+ }
+
+ len += print_array_to_buf(buf, len, "ul_mumimo_grp_best_grp_size",
+ htt_stats_buf->ul_mumimo_grp_best_grp_size,
+ ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ, "\n");
+ len += print_array_to_buf(buf, len, "ul_mumimo_grp_best_num_usrs = ",
+ htt_stats_buf->ul_mumimo_grp_best_usrs,
+ ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS, "\n");
+ len += print_array_to_buf(buf, len,
+ "ul_mumimo_grp_tputs_observed (per bin = 300 mbps)",
+ htt_stats_buf->ul_mumimo_grp_tputs,
+ ATH12K_HTT_STATS_MUMIMO_TPUT_NUM_BINS, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_mpdu_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 user_index;
+ u32 tx_sched_mode;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ user_index = __le32_to_cpu(htt_stats_buf->user_index);
+ tx_sched_mode = __le32_to_cpu(htt_stats_buf->tx_sched_mode);
+
+ if (tx_sched_mode == ATH12K_HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC) {
+ if (!user_index)
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_AC_MPDU_STATS:\n");
+
+ if (user_index < ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_queued_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdus_queued_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_tried_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdus_tried_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_failed_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdus_failed_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_requeued_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdus_requeued_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_err_no_ba_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->err_no_ba_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdu_underrun_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdu_underrun_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_ampdu_underrun_usr_%u = %u\n\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->ampdu_underrun_usr));
+ }
+ }
+
+ if (tx_sched_mode == ATH12K_HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX) {
+ if (!user_index)
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_AX_MPDU_STATS:\n");
+
+ if (user_index < ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_queued_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdus_queued_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_tried_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdus_tried_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_failed_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdus_failed_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_requeued_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdus_requeued_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_err_no_ba_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->err_no_ba_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdu_underrun_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdu_underrun_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_ampdu_underrun_usr_%u = %u\n\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->ampdu_underrun_usr));
+ }
+ }
+
+ if (tx_sched_mode == ATH12K_HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX) {
+ if (!user_index)
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_AX_MU_OFDMA_MPDU_STATS:\n");
+
+ if (user_index < ATH12K_HTT_TX_NUM_OFDMA_USER_STATS) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_queued_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdus_queued_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_tried_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdus_tried_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_failed_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdus_failed_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_requeued_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdus_requeued_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_err_no_ba_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->err_no_ba_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdu_underrun_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdu_underrun_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_ampdu_underrun_usr_%u = %u\n\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->ampdu_underrun_usr));
+ }
+ }
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_pdev_cca_stats_hist_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_cca_stats_hist_v1_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_CCA_STATS_HIST_TLV :\n");
+ len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_num));
+ len += scnprintf(buf + len, buf_len - len, "num_records = %u\n",
+ le32_to_cpu(htt_stats_buf->num_records));
+ len += scnprintf(buf + len, buf_len - len, "valid_cca_counters_bitmap = 0x%x\n",
+ le32_to_cpu(htt_stats_buf->valid_cca_counters_bitmap));
+ len += scnprintf(buf + len, buf_len - len, "collection_interval = %u\n\n",
+ le32_to_cpu(htt_stats_buf->collection_interval));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_pdev_stats_cca_counters_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_stats_cca_counters_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_PDEV_STATS_CCA_COUNTERS_TLV:(in usec)\n");
+ len += scnprintf(buf + len, buf_len - len, "tx_frame_usec = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_frame_usec));
+ len += scnprintf(buf + len, buf_len - len, "rx_frame_usec = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_frame_usec));
+ len += scnprintf(buf + len, buf_len - len, "rx_clear_usec = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_clear_usec));
+ len += scnprintf(buf + len, buf_len - len, "my_rx_frame_usec = %u\n",
+ le32_to_cpu(htt_stats_buf->my_rx_frame_usec));
+ len += scnprintf(buf + len, buf_len - len, "usec_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->usec_cnt));
+ len += scnprintf(buf + len, buf_len - len, "med_rx_idle_usec = %u\n",
+ le32_to_cpu(htt_stats_buf->med_rx_idle_usec));
+ len += scnprintf(buf + len, buf_len - len, "med_tx_idle_global_usec = %u\n",
+ le32_to_cpu(htt_stats_buf->med_tx_idle_global_usec));
+ len += scnprintf(buf + len, buf_len - len, "cca_obss_usec = %u\n\n",
+ le32_to_cpu(htt_stats_buf->cca_obss_usec));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_pdev_obss_pd_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_obss_pd_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 i;
+ static const char *access_cat_names[ATH12K_HTT_NUM_AC_WMM] = {"best effort",
+ "background",
+ "video", "voice"};
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_OBSS_PD_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "num_spatial_reuse_tx = %u\n",
+ le32_to_cpu(htt_stats_buf->num_sr_tx_transmissions));
+ len += scnprintf(buf + len, buf_len - len,
+ "num_spatial_reuse_opportunities = %u\n",
+ le32_to_cpu(htt_stats_buf->num_spatial_reuse_opportunities));
+ len += scnprintf(buf + len, buf_len - len, "num_non_srg_opportunities = %u\n",
+ le32_to_cpu(htt_stats_buf->num_non_srg_opportunities));
+ len += scnprintf(buf + len, buf_len - len, "num_non_srg_ppdu_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->num_non_srg_ppdu_tried));
+ len += scnprintf(buf + len, buf_len - len, "num_non_srg_ppdu_success = %u\n",
+ le32_to_cpu(htt_stats_buf->num_non_srg_ppdu_success));
+ len += scnprintf(buf + len, buf_len - len, "num_srg_opportunities = %u\n",
+ le32_to_cpu(htt_stats_buf->num_srg_opportunities));
+ len += scnprintf(buf + len, buf_len - len, "num_srg_ppdu_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->num_srg_ppdu_tried));
+ len += scnprintf(buf + len, buf_len - len, "num_srg_ppdu_success = %u\n",
+ le32_to_cpu(htt_stats_buf->num_srg_ppdu_success));
+ len += scnprintf(buf + len, buf_len - len, "num_psr_opportunities = %u\n",
+ le32_to_cpu(htt_stats_buf->num_psr_opportunities));
+ len += scnprintf(buf + len, buf_len - len, "num_psr_ppdu_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->num_psr_ppdu_tried));
+ len += scnprintf(buf + len, buf_len - len, "num_psr_ppdu_success = %u\n",
+ le32_to_cpu(htt_stats_buf->num_psr_ppdu_success));
+ len += scnprintf(buf + len, buf_len - len, "min_duration_check_flush_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->num_obss_min_dur_check_flush_cnt));
+ len += scnprintf(buf + len, buf_len - len, "sr_ppdu_abort_flush_cnt = %u\n\n",
+ le32_to_cpu(htt_stats_buf->num_sr_ppdu_abort_flush_cnt));
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_OBSS_PD_PER_AC_STATS:\n");
+ for (i = 0; i < ATH12K_HTT_NUM_AC_WMM; i++) {
+ len += scnprintf(buf + len, buf_len - len, "Access Category %u (%s)\n",
+ i, access_cat_names[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "num_non_srg_ppdu_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->num_non_srg_tried_per_ac[i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "num_non_srg_ppdu_success = %u\n",
+ le32_to_cpu(htt_stats_buf->num_non_srg_success_ac[i]));
+ len += scnprintf(buf + len, buf_len - len, "num_srg_ppdu_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->num_srg_tried_per_ac[i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "num_srg_ppdu_success = %u\n\n",
+ le32_to_cpu(htt_stats_buf->num_srg_success_per_ac[i]));
+ }
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_pdev_tx_rate_txbf_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_txrate_txbf_stats_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u8 i;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_STATS_PDEV_TX_RATE_TXBF_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "Legacy OFDM Rates: 6 Mbps: %u, ",
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[0]));
+ len += scnprintf(buf + len, buf_len - len, "9 Mbps: %u, 12 Mbps: %u, ",
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[1]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[2]));
+ len += scnprintf(buf + len, buf_len - len, "18 Mbps: %u\n",
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[3]));
+ len += scnprintf(buf + len, buf_len - len, "24 Mbps: %u, 36 Mbps: %u, ",
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[4]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[5]));
+ len += scnprintf(buf + len, buf_len - len, "48 Mbps: %u, 54 Mbps: %u\n",
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[6]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[7]));
+
+ len += print_array_to_buf(buf, len, "tx_ol_mcs", htt_stats_buf->tx_su_ol_mcs,
+ ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "tx_ibf_mcs", htt_stats_buf->tx_su_ibf_mcs,
+ ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "tx_txbf_mcs", htt_stats_buf->tx_su_txbf_mcs,
+ ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf_index(buf, len, "tx_ol_nss", 1,
+ htt_stats_buf->tx_su_ol_nss,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS,
+ "\n");
+ len += print_array_to_buf_index(buf, len, "tx_ibf_nss", 1,
+ htt_stats_buf->tx_su_ibf_nss,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS,
+ "\n");
+ len += print_array_to_buf_index(buf, len, "tx_txbf_nss", 1,
+ htt_stats_buf->tx_su_txbf_nss,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS,
+ "\n");
+ len += print_array_to_buf(buf, len, "tx_ol_bw", htt_stats_buf->tx_su_ol_bw,
+ ATH12K_HTT_TXBF_NUM_BW_CNTRS, "\n");
+ for (i = 0; i < ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES; i++)
+ len += print_array_to_buf(buf, len, i ? "quarter_tx_ol_bw" :
+ "half_tx_ol_bw",
+ htt_stats_buf->ol[i],
+ ATH12K_HTT_TXBF_NUM_BW_CNTRS,
+ "\n");
+
+ len += print_array_to_buf(buf, len, "tx_ibf_bw", htt_stats_buf->tx_su_ibf_bw,
+ ATH12K_HTT_TXBF_NUM_BW_CNTRS, "\n");
+ for (i = 0; i < ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES; i++)
+ len += print_array_to_buf(buf, len, i ? "quarter_tx_ibf_bw" :
+ "half_tx_ibf_bw",
+ htt_stats_buf->ibf[i],
+ ATH12K_HTT_TXBF_NUM_BW_CNTRS,
+ "\n");
+
+ len += print_array_to_buf(buf, len, "tx_txbf_bw", htt_stats_buf->tx_su_txbf_bw,
+ ATH12K_HTT_TXBF_NUM_BW_CNTRS, "\n");
+ for (i = 0; i < ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES; i++)
+ len += print_array_to_buf(buf, len, i ? "quarter_tx_txbf_bw" :
+ "half_tx_txbf_bw",
+ htt_stats_buf->txbf[i],
+ ATH12K_HTT_TXBF_NUM_BW_CNTRS,
+ "\n");
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_STATS_PDEV_TXBF_FLAG_RETURN_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "TXBF_reason_code_stats: 0:%u, 1:%u,",
+ le32_to_cpu(htt_stats_buf->txbf_flag_set_mu_mode),
+ le32_to_cpu(htt_stats_buf->txbf_flag_set_final_status));
+ len += scnprintf(buf + len, buf_len - len, " 2:%u, 3:%u, 4:%u, 5:%u, ",
+ le32_to_cpu(htt_stats_buf->txbf_flag_not_set_verified_txbf_mode),
+ le32_to_cpu(htt_stats_buf->txbf_flag_not_set_disable_p2p_access),
+ le32_to_cpu(htt_stats_buf->txbf_flag_not_set_max_nss_in_he160),
+ le32_to_cpu(htt_stats_buf->txbf_flag_not_set_disable_uldlofdma));
+ len += scnprintf(buf + len, buf_len - len, "6:%u, 7:%u\n\n",
+ le32_to_cpu(htt_stats_buf->txbf_flag_not_set_mcs_threshold_val),
+ le32_to_cpu(htt_stats_buf->txbf_flag_not_set_final_status));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_ndpa_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 num_elements;
+ u8 i;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ num_elements = le32_to_cpu(stats_buf->num_elems_ax_ndpa_arr);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_NDPA_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndpa_queued =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_queued));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndpa_tried =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_tried));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndpa_flushed =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_flush));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndpa_err =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_err));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_ndp_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_txbf_ofdma_ax_ndp_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 num_elements;
+ u8 i;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ num_elements = le32_to_cpu(stats_buf->num_elems_ax_ndp_arr);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_NDP_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndp_queued =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_queued));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndp_tried =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_tried));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndp_flushed =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_flush));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndp_err =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_err));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_brp_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_txbf_ofdma_ax_brp_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 num_elements;
+ u8 i;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ num_elements = le32_to_cpu(stats_buf->num_elems_ax_brp_arr);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_BRP_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_ofdma_brpoll_queued =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_queued));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brpoll_tied =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_tried));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brpoll_flushed =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_flushed));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brp_err =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_err));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brp_err_num_cbf_rcvd =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_num_cbf_rcvd));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_steer_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_txbf_ofdma_ax_steer_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 num_elements;
+ u8 i;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ num_elements = le32_to_cpu(stats_buf->num_elems_ax_steer_arr);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_AX_STEER_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_ofdma_num_ppdu_steer =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_steer[i].num_ppdu_steer));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_num_usrs_prefetch =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_steer[i].num_usr_prefetch));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_num_usrs_sound =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_steer[i].num_usr_sound));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_num_usrs_force_sound =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_steer[i].num_usr_force_sound));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_steer_mpdu_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_txbf_ofdma_ax_steer_mpdu_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_AX_STEER_MPDU_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "rbo_steer_mpdus_tried = %u\n",
+ le32_to_cpu(stats_buf->ax_ofdma_rbo_steer_mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "rbo_steer_mpdus_failed = %u\n",
+ le32_to_cpu(stats_buf->ax_ofdma_rbo_steer_mpdus_failed));
+ len += scnprintf(buf + len, buf_len - len, "sifs_steer_mpdus_tried = %u\n",
+ le32_to_cpu(stats_buf->ax_ofdma_sifs_steer_mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "sifs_steer_mpdus_failed = %u\n\n",
+ le32_to_cpu(stats_buf->ax_ofdma_sifs_steer_mpdus_failed));
+
+ stats_req->buf_len = len;
+}
+
+static void ath12k_htt_print_dlpager_entry(const struct ath12k_htt_pgs_info *pg_info,
+ int idx, char *str_buf)
+{
+ u64 page_timestamp;
+ u16 index = 0;
+
+ page_timestamp = ath12k_le32hilo_to_u64(pg_info->ts_msb, pg_info->ts_lsb);
+
+ index += snprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index,
+ "Index - %u ; Page Number - %u ; ",
+ idx, le32_to_cpu(pg_info->page_num));
+ index += snprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index,
+ "Num of pages - %u ; Timestamp - %lluus\n",
+ le32_to_cpu(pg_info->num_pgs), page_timestamp);
+}
+
+static void
+ath12k_htt_print_dlpager_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_dl_pager_stats_tlv *stat_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 dword_lock, dword_unlock;
+ int i;
+ u8 *buf = stats_req->buf;
+ u8 pg_locked;
+ u8 pg_unlock;
+ char str_buf[ATH12K_HTT_MAX_STRING_LEN] = {0};
+
+ if (tag_len < sizeof(*stat_buf))
+ return;
+
+ dword_lock = le32_get_bits(stat_buf->info2,
+ ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO2);
+ dword_unlock = le32_get_bits(stat_buf->info2,
+ ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO2);
+
+ pg_locked = ATH12K_HTT_STATS_PAGE_LOCKED;
+ pg_unlock = ATH12K_HTT_STATS_PAGE_UNLOCKED;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_DLPAGER_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ASYNC locked pages = %u\n",
+ le32_get_bits(stat_buf->info0,
+ ATH12K_HTT_DLPAGER_ASYNC_LOCK_PG_CNT_INFO0));
+ len += scnprintf(buf + len, buf_len - len, "SYNC locked pages = %u\n",
+ le32_get_bits(stat_buf->info0,
+ ATH12K_HTT_DLPAGER_SYNC_LOCK_PG_CNT_INFO0));
+ len += scnprintf(buf + len, buf_len - len, "Total locked pages = %u\n",
+ le32_get_bits(stat_buf->info1,
+ ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO1));
+ len += scnprintf(buf + len, buf_len - len, "Total free pages = %u\n",
+ le32_get_bits(stat_buf->info1,
+ ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO1));
+
+ len += scnprintf(buf + len, buf_len - len, "\nLOCKED PAGES HISTORY\n");
+ len += scnprintf(buf + len, buf_len - len, "last_locked_page_idx = %u\n",
+ dword_lock ? dword_lock - 1 : (ATH12K_PAGER_MAX - 1));
+
+ for (i = 0; i < ATH12K_PAGER_MAX; i++) {
+ memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
+ ath12k_htt_print_dlpager_entry(&stat_buf->pgs_info[pg_locked][i],
+ i, str_buf);
+ len += scnprintf(buf + len, buf_len - len, "%s", str_buf);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\nUNLOCKED PAGES HISTORY\n");
+ len += scnprintf(buf + len, buf_len - len, "last_unlocked_page_idx = %u\n",
+ dword_unlock ? dword_unlock - 1 : ATH12K_PAGER_MAX - 1);
+
+ for (i = 0; i < ATH12K_PAGER_MAX; i++) {
+ memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
+ ath12k_htt_print_dlpager_entry(&stat_buf->pgs_info[pg_unlock][i],
+ i, str_buf);
+ len += scnprintf(buf + len, buf_len - len, "%s", str_buf);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_stats_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf, i;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_STATS_TLV:\n");
+ for (i = 0; i < ATH12K_HTT_STATS_MAX_CHAINS; i++)
+ len += scnprintf(buf + len, buf_len - len, "bdf_nf_chain[%d] = %d\n",
+ i, a_sle32_to_cpu(htt_stats_buf->nf_chain[i]));
+ for (i = 0; i < ATH12K_HTT_STATS_MAX_CHAINS; i++)
+ len += scnprintf(buf + len, buf_len - len, "runtime_nf_chain[%d] = %d\n",
+ i, a_sle32_to_cpu(htt_stats_buf->runtime_nf_chain[i]));
+ len += scnprintf(buf + len, buf_len - len, "false_radar_cnt = %u / %u (mins)\n",
+ le32_to_cpu(htt_stats_buf->false_radar_cnt),
+ le32_to_cpu(htt_stats_buf->fw_run_time));
+ len += scnprintf(buf + len, buf_len - len, "radar_cs_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->radar_cs_cnt));
+ len += scnprintf(buf + len, buf_len - len, "ani_level = %d\n\n",
+ a_sle32_to_cpu(htt_stats_buf->ani_level));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_counters_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_counters_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_COUNTERS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "rx_ofdma_timing_err_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_ofdma_timing_err_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_cck_fail_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_cck_fail_cnt));
+ len += scnprintf(buf + len, buf_len - len, "mactx_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->mactx_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "macrx_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->macrx_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "phytx_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->phytx_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "phyrx_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->phyrx_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "phyrx_defer_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->phyrx_defer_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_lstf_event_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_gain_adj_lstf_event_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_non_legacy_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_gain_adj_non_legacy_cnt));
+ len += print_array_to_buf(buf, len, "rx_pkt_cnt", htt_stats_buf->rx_pkt_cnt,
+ ATH12K_HTT_MAX_RX_PKT_CNT, "\n");
+ len += print_array_to_buf(buf, len, "rx_pkt_crc_pass_cnt",
+ htt_stats_buf->rx_pkt_crc_pass_cnt,
+ ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT, "\n");
+ len += print_array_to_buf(buf, len, "per_blk_err_cnt",
+ htt_stats_buf->per_blk_err_cnt,
+ ATH12K_HTT_MAX_PER_BLK_ERR_CNT, "\n");
+ len += print_array_to_buf(buf, len, "rx_ota_err_cnt",
+ htt_stats_buf->rx_ota_err_cnt,
+ ATH12K_HTT_MAX_RX_OTA_ERR_CNT, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_reset_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_reset_stats_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ le32_to_cpu(htt_stats_buf->pdev_id));
+ len += scnprintf(buf + len, buf_len - len, "chan_mhz = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_mhz));
+ len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq1 = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_band_center_freq1));
+ len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq2 = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_band_center_freq2));
+ len += scnprintf(buf + len, buf_len - len, "chan_phy_mode = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_phy_mode));
+ len += scnprintf(buf + len, buf_len - len, "chan_flags = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->chan_flags));
+ len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_num));
+ len += scnprintf(buf + len, buf_len - len, "reset_cause = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->reset_cause));
+ len += scnprintf(buf + len, buf_len - len, "prev_reset_cause = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->prev_reset_cause));
+ len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_src = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_warm_reset_src));
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_tbl_mode = %d\n",
+ le32_to_cpu(htt_stats_buf->rx_gain_tbl_mode));
+ len += scnprintf(buf + len, buf_len - len, "xbar_val = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->xbar_val));
+ len += scnprintf(buf + len, buf_len - len, "force_calibration = %u\n",
+ le32_to_cpu(htt_stats_buf->force_calibration));
+ len += scnprintf(buf + len, buf_len - len, "phyrf_mode = %u\n",
+ le32_to_cpu(htt_stats_buf->phyrf_mode));
+ len += scnprintf(buf + len, buf_len - len, "phy_homechan = %u\n",
+ le32_to_cpu(htt_stats_buf->phy_homechan));
+ len += scnprintf(buf + len, buf_len - len, "phy_tx_ch_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_tx_ch_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_rx_ch_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_rx_ch_mask));
+ len += scnprintf(buf + len, buf_len - len, "phybb_ini_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phybb_ini_mask));
+ len += scnprintf(buf + len, buf_len - len, "phyrf_ini_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phyrf_ini_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_dfs_en_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_dfs_en_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_sscan_en_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_sscan_en_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_synth_sel_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_synth_sel_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_adfs_freq = %u\n",
+ le32_to_cpu(htt_stats_buf->phy_adfs_freq));
+ len += scnprintf(buf + len, buf_len - len, "cck_fir_settings = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->cck_fir_settings));
+ len += scnprintf(buf + len, buf_len - len, "phy_dyn_pri_chan = %u\n",
+ le32_to_cpu(htt_stats_buf->phy_dyn_pri_chan));
+ len += scnprintf(buf + len, buf_len - len, "cca_thresh = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->cca_thresh));
+ len += scnprintf(buf + len, buf_len - len, "dyn_cca_status = %u\n",
+ le32_to_cpu(htt_stats_buf->dyn_cca_status));
+ len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_hw = 0x%x\n",
+ le32_to_cpu(htt_stats_buf->rxdesense_thresh_hw));
+ len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_sw = 0x%x\n\n",
+ le32_to_cpu(htt_stats_buf->rxdesense_thresh_sw));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_reset_counters_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_reset_counters_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_COUNTERS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ le32_to_cpu(htt_stats_buf->pdev_id));
+ len += scnprintf(buf + len, buf_len - len, "cf_active_low_fail_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->cf_active_low_fail_cnt));
+ len += scnprintf(buf + len, buf_len - len, "cf_active_low_pass_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->cf_active_low_pass_cnt));
+ len += scnprintf(buf + len, buf_len - len, "phy_off_through_vreg_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->phy_off_through_vreg_cnt));
+ len += scnprintf(buf + len, buf_len - len, "force_calibration_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->force_calibration_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rf_mode_switch_phy_off_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rf_mode_switch_phy_off_cnt));
+ len += scnprintf(buf + len, buf_len - len, "temperature_recal_cnt = %u\n\n",
+ le32_to_cpu(htt_stats_buf->temperature_recal_cnt));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_tpc_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_tpc_stats_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_TPC_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ le32_to_cpu(htt_stats_buf->pdev_id));
+ len += scnprintf(buf + len, buf_len - len, "tx_power_scale = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_power_scale));
+ len += scnprintf(buf + len, buf_len - len, "tx_power_scale_db = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_power_scale_db));
+ len += scnprintf(buf + len, buf_len - len, "min_negative_tx_power = %d\n",
+ le32_to_cpu(htt_stats_buf->min_negative_tx_power));
+ len += scnprintf(buf + len, buf_len - len, "reg_ctl_domain = %u\n",
+ le32_to_cpu(htt_stats_buf->reg_ctl_domain));
+ len += scnprintf(buf + len, buf_len - len, "twice_max_rd_power = %u\n",
+ le32_to_cpu(htt_stats_buf->twice_max_rd_power));
+ len += scnprintf(buf + len, buf_len - len, "max_tx_power = %u\n",
+ le32_to_cpu(htt_stats_buf->max_tx_power));
+ len += scnprintf(buf + len, buf_len - len, "home_max_tx_power = %u\n",
+ le32_to_cpu(htt_stats_buf->home_max_tx_power));
+ len += scnprintf(buf + len, buf_len - len, "psd_power = %d\n",
+ le32_to_cpu(htt_stats_buf->psd_power));
+ len += scnprintf(buf + len, buf_len - len, "eirp_power = %u\n",
+ le32_to_cpu(htt_stats_buf->eirp_power));
+ len += scnprintf(buf + len, buf_len - len, "power_type_6ghz = %u\n",
+ le32_to_cpu(htt_stats_buf->power_type_6ghz));
+ len += print_array_to_buf(buf, len, "max_reg_allowed_power",
+ htt_stats_buf->max_reg_allowed_power,
+ ATH12K_HTT_STATS_MAX_CHAINS, "\n");
+ len += print_array_to_buf(buf, len, "max_reg_allowed_power_6ghz",
+ htt_stats_buf->max_reg_allowed_power_6ghz,
+ ATH12K_HTT_STATS_MAX_CHAINS, "\n");
+ len += print_array_to_buf(buf, len, "sub_band_cfreq",
+ htt_stats_buf->sub_band_cfreq,
+ ATH12K_HTT_MAX_CH_PWR_INFO_SIZE, "\n");
+ len += print_array_to_buf(buf, len, "sub_band_txpower",
+ htt_stats_buf->sub_band_txpower,
+ ATH12K_HTT_MAX_CH_PWR_INFO_SIZE, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_soc_txrx_stats_common_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_t2h_soc_txrx_stats_common_tlv *htt_stats_buf = tag_buf;
+ u64 drop_count;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ drop_count = ath12k_le32hilo_to_u64(htt_stats_buf->inv_peers_msdu_drop_count_hi,
+ htt_stats_buf->inv_peers_msdu_drop_count_lo);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SOC_COMMON_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "soc_drop_count = %llu\n\n",
+ drop_count);
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_per_rate_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_per_rate_stats_tlv *stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 ru_size_cnt = 0;
+ u32 rc_mode, ru_type;
+ u8 *buf = stats_req->buf, i;
+ const char *mode_prefix;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ rc_mode = le32_to_cpu(stats_buf->rc_mode);
+ ru_type = le32_to_cpu(stats_buf->ru_type);
+
+ switch (rc_mode) {
+ case ATH12K_HTT_STATS_RC_MODE_DLSU:
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PER_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_SU:\n");
+ mode_prefix = "su";
+ break;
+ case ATH12K_HTT_STATS_RC_MODE_DLMUMIMO:
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_DL_MUMIMO:\n");
+ mode_prefix = "mu";
+ break;
+ case ATH12K_HTT_STATS_RC_MODE_DLOFDMA:
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_DL_OFDMA:\n");
+ mode_prefix = "ofdma";
+ if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY)
+ ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS;
+ else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU)
+ ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS;
+ break;
+ case ATH12K_HTT_STATS_RC_MODE_ULMUMIMO:
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PER_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_UL_MUMIMO:\n");
+ mode_prefix = "ulmu";
+ break;
+ case ATH12K_HTT_STATS_RC_MODE_ULOFDMA:
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_UL_OFDMA:\n");
+ mode_prefix = "ulofdma";
+ if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY)
+ ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS;
+ else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU)
+ ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS;
+ break;
+ default:
+ return;
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\nPER per BW:\n");
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_bw[i].ppdus_tried));
+ len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
+ le32_to_cpu(stats_buf->per_bw320.ppdus_tried));
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_bw[i].ppdus_ack_failed));
+ len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
+ le32_to_cpu(stats_buf->per_bw320.ppdus_ack_failed));
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_bw[i].mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
+ le32_to_cpu(stats_buf->per_bw320.mpdus_tried));
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u", i,
+ le32_to_cpu(stats_buf->per_bw[i].mpdus_failed));
+ len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
+ le32_to_cpu(stats_buf->per_bw320.mpdus_failed));
+
+ len += scnprintf(buf + len, buf_len - len, "\nPER per NSS:\n");
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
+ le32_to_cpu(stats_buf->per_nss[i].ppdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
+ le32_to_cpu(stats_buf->per_nss[i].ppdus_ack_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
+ le32_to_cpu(stats_buf->per_nss[i].mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
+ le32_to_cpu(stats_buf->per_nss[i].mpdus_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "\nPER per MCS:\n");
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_mcs[i].ppdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_mcs[i].ppdus_ack_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_mcs[i].mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_mcs[i].mpdus_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if ((rc_mode == ATH12K_HTT_STATS_RC_MODE_DLOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA) &&
+ ru_type != ATH12K_HTT_STATS_RU_TYPE_INVALID) {
+ len += scnprintf(buf + len, buf_len - len, "\nPER per RU:\n");
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA)
+ len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ru_size_cnt; i++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_tx_ru_size_to_str(ru_type, i),
+ le32_to_cpu(stats_buf->ru[i].ppdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA)
+ len += scnprintf(buf + len, buf_len - len,
+ "non_data_ppdus_%s = ", mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len,
+ "ppdus_ack_failed_%s = ", mode_prefix);
+ for (i = 0; i < ru_size_cnt; i++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_tx_ru_size_to_str(ru_type, i),
+ le32_to_cpu(stats_buf->ru[i].ppdus_ack_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ru_size_cnt; i++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_tx_ru_size_to_str(ru_type, i),
+ le32_to_cpu(stats_buf->ru[i].mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ",
+ mode_prefix);
+ for (i = 0; i < ru_size_cnt; i++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_tx_ru_size_to_str(ru_type, i),
+ le32_to_cpu(stats_buf->ru[i].mpdus_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
+ }
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_DLMUMIMO) {
+ len += scnprintf(buf + len, buf_len - len, "\nlast_probed_bw = %u\n",
+ le32_to_cpu(stats_buf->last_probed_bw));
+ len += scnprintf(buf + len, buf_len - len, "last_probed_nss = %u\n",
+ le32_to_cpu(stats_buf->last_probed_nss));
+ len += scnprintf(buf + len, buf_len - len, "last_probed_mcs = %u\n",
+ le32_to_cpu(stats_buf->last_probed_mcs));
+ len += print_array_to_buf(buf, len, "MU Probe count per RC MODE",
+ stats_buf->probe_cnt,
+ ATH12K_HTT_RC_MODE_2D_COUNT, "\n\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_ast_entry_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_ast_entry_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 mac_addr_l32;
+ u32 mac_addr_h16;
+ u32 ast_info;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_addr_l32 = le32_to_cpu(htt_stats_buf->mac_addr.mac_addr_l32);
+ mac_addr_h16 = le32_to_cpu(htt_stats_buf->mac_addr.mac_addr_h16);
+ ast_info = le32_to_cpu(htt_stats_buf->info);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_AST_ENTRY_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ast_index = %u\n",
+ le32_to_cpu(htt_stats_buf->ast_index));
+ len += scnprintf(buf + len, buf_len - len,
+ "mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_0),
+ u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_1),
+ u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_2),
+ u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_3),
+ u32_get_bits(mac_addr_h16, ATH12K_HTT_MAC_ADDR_H16_0),
+ u32_get_bits(mac_addr_h16, ATH12K_HTT_MAC_ADDR_H16_1));
+
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n",
+ le32_to_cpu(htt_stats_buf->sw_peer_id));
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_PDEV_ID_INFO));
+ len += scnprintf(buf + len, buf_len - len, "vdev_id = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_VDEV_ID_INFO));
+ len += scnprintf(buf + len, buf_len - len, "next_hop = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_NEXT_HOP_INFO));
+ len += scnprintf(buf + len, buf_len - len, "mcast = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_MCAST_INFO));
+ len += scnprintf(buf + len, buf_len - len, "monitor_direct = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_MONITOR_DIRECT_INFO));
+ len += scnprintf(buf + len, buf_len - len, "mesh_sta = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_MESH_STA_INFO));
+ len += scnprintf(buf + len, buf_len - len, "mec = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_MEC_INFO));
+ len += scnprintf(buf + len, buf_len - len, "intra_bss = %u\n\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_INTRA_BSS_INFO));
+
+ stats_req->buf_len = len;
+}
+
+static const char*
+ath12k_htt_get_punct_dir_type_str(enum ath12k_htt_stats_direction direction)
+{
+ switch (direction) {
+ case ATH12K_HTT_STATS_DIRECTION_TX:
+ return "tx";
+ case ATH12K_HTT_STATS_DIRECTION_RX:
+ return "rx";
+ default:
+ return "unknown";
+ }
+}
+
+static const char*
+ath12k_htt_get_punct_ppdu_type_str(enum ath12k_htt_stats_ppdu_type ppdu_type)
+{
+ switch (ppdu_type) {
+ case ATH12K_HTT_STATS_PPDU_TYPE_MODE_SU:
+ return "su";
+ case ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_MIMO:
+ return "dl_mu_mimo";
+ case ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_MIMO:
+ return "ul_mu_mimo";
+ case ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_OFDMA:
+ return "dl_mu_ofdma";
+ case ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_OFDMA:
+ return "ul_mu_ofdma";
+ default:
+ return "unknown";
+ }
+}
+
+static const char*
+ath12k_htt_get_punct_pream_type_str(enum ath12k_htt_stats_param_type pream_type)
+{
+ switch (pream_type) {
+ case ATH12K_HTT_STATS_PREAM_OFDM:
+ return "ofdm";
+ case ATH12K_HTT_STATS_PREAM_CCK:
+ return "cck";
+ case ATH12K_HTT_STATS_PREAM_HT:
+ return "ht";
+ case ATH12K_HTT_STATS_PREAM_VHT:
+ return "ac";
+ case ATH12K_HTT_STATS_PREAM_HE:
+ return "ax";
+ case ATH12K_HTT_STATS_PREAM_EHT:
+ return "be";
+ default:
+ return "unknown";
+ }
+}
+
+static void
+ath12k_htt_print_puncture_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_puncture_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ const char *direction;
+ const char *ppdu_type;
+ const char *preamble;
+ u32 mac_id__word;
+ u32 subband_limit;
+ u8 i;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ mac_id__word = le32_to_cpu(stats_buf->mac_id__word);
+ subband_limit = min(le32_to_cpu(stats_buf->subband_cnt),
+ ATH12K_HTT_PUNCT_STATS_MAX_SUBBAND_CNT);
+
+ direction = ath12k_htt_get_punct_dir_type_str(le32_to_cpu(stats_buf->direction));
+ ppdu_type = ath12k_htt_get_punct_ppdu_type_str(le32_to_cpu(stats_buf->ppdu_type));
+ preamble = ath12k_htt_get_punct_pream_type_str(le32_to_cpu(stats_buf->preamble));
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_PUNCTURE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id__word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len,
+ "%s_%s_%s_last_used_pattern_mask = 0x%08x\n",
+ direction, preamble, ppdu_type,
+ le32_to_cpu(stats_buf->last_used_pattern_mask));
+
+ for (i = 0; i < subband_limit; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "%s_%s_%s_num_subbands_used_cnt_%02d = %u\n",
+ direction, preamble, ppdu_type, i + 1,
+ le32_to_cpu(stats_buf->num_subbands_used_cnt[i]));
+ }
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_dmac_reset_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_dmac_reset_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u64 time;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_DMAC_RESET_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "reset_count = %u\n",
+ le32_to_cpu(htt_stats_buf->reset_count));
+ time = ath12k_le32hilo_to_u64(htt_stats_buf->reset_time_hi_ms,
+ htt_stats_buf->reset_time_lo_ms);
+ len += scnprintf(buf + len, buf_len - len, "reset_time_ms = %llu\n", time);
+ time = ath12k_le32hilo_to_u64(htt_stats_buf->disengage_time_hi_ms,
+ htt_stats_buf->disengage_time_lo_ms);
+ len += scnprintf(buf + len, buf_len - len, "disengage_time_ms = %llu\n", time);
+
+ time = ath12k_le32hilo_to_u64(htt_stats_buf->engage_time_hi_ms,
+ htt_stats_buf->engage_time_lo_ms);
+ len += scnprintf(buf + len, buf_len - len, "engage_time_ms = %llu\n", time);
+
+ len += scnprintf(buf + len, buf_len - len, "disengage_count = %u\n",
+ le32_to_cpu(htt_stats_buf->disengage_count));
+ len += scnprintf(buf + len, buf_len - len, "engage_count = %u\n",
+ le32_to_cpu(htt_stats_buf->engage_count));
+ len += scnprintf(buf + len, buf_len - len, "drain_dest_ring_mask = 0x%x\n\n",
+ le32_to_cpu(htt_stats_buf->drain_dest_ring_mask));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_pdev_sched_algo_ofdma_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_sched_algo_ofdma_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_SCHED_ALGO_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += print_array_to_buf(buf, len, "rate_based_dlofdma_enabled_count",
+ htt_stats_buf->rate_based_dlofdma_enabled_cnt,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "rate_based_dlofdma_disabled_count",
+ htt_stats_buf->rate_based_dlofdma_disabled_cnt,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "rate_based_dlofdma_probing_count",
+ htt_stats_buf->rate_based_dlofdma_disabled_cnt,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "rate_based_dlofdma_monitoring_count",
+ htt_stats_buf->rate_based_dlofdma_monitor_cnt,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "chan_acc_lat_based_dlofdma_enabled_count",
+ htt_stats_buf->chan_acc_lat_based_dlofdma_enabled_cnt,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "chan_acc_lat_based_dlofdma_disabled_count",
+ htt_stats_buf->chan_acc_lat_based_dlofdma_disabled_cnt,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "chan_acc_lat_based_dlofdma_monitoring_count",
+ htt_stats_buf->chan_acc_lat_based_dlofdma_monitor_cnt,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "downgrade_to_dl_su_ru_alloc_fail",
+ htt_stats_buf->downgrade_to_dl_su_ru_alloc_fail,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "candidate_list_single_user_disable_ofdma",
+ htt_stats_buf->candidate_list_single_user_disable_ofdma,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "dl_cand_list_dropped_high_ul_qos_weight",
+ htt_stats_buf->dl_cand_list_dropped_high_ul_qos_weight,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "ax_dlofdma_disabled_due_to_pipelining",
+ htt_stats_buf->ax_dlofdma_disabled_due_to_pipelining,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "dlofdma_disabled_su_only_eligible",
+ htt_stats_buf->dlofdma_disabled_su_only_eligible,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "dlofdma_disabled_consec_no_mpdus_tried",
+ htt_stats_buf->dlofdma_disabled_consec_no_mpdus_tried,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "dlofdma_disabled_consec_no_mpdus_success",
+ htt_stats_buf->dlofdma_disabled_consec_no_mpdus_success,
+ ATH12K_HTT_NUM_AC_WMM, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_pdev_rate_stats_be_ofdma_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_rate_stats_be_ofdma_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+ u8 i;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_RATE_STATS_BE_OFDMA_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "be_ofdma_tx_ldpc = %u\n",
+ le32_to_cpu(htt_stats_buf->be_ofdma_tx_ldpc));
+ len += print_array_to_buf(buf, len, "be_ofdma_tx_mcs",
+ htt_stats_buf->be_ofdma_tx_mcs,
+ ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS, "\n");
+ len += print_array_to_buf(buf, len, "be_ofdma_eht_sig_mcs",
+ htt_stats_buf->be_ofdma_eht_sig_mcs,
+ ATH12K_HTT_TX_PDEV_NUM_EHT_SIG_MCS_CNTRS, "\n");
+ len += scnprintf(buf + len, buf_len - len, "be_ofdma_tx_ru_size = ");
+ for (i = 0; i < ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_htt_be_tx_rx_ru_size_to_str(i),
+ le32_to_cpu(htt_stats_buf->be_ofdma_tx_ru_size[i]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += print_array_to_buf_index(buf, len, "be_ofdma_tx_nss = ", 1,
+ htt_stats_buf->be_ofdma_tx_nss,
+ ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS,
+ "\n");
+ len += print_array_to_buf(buf, len, "be_ofdma_tx_bw",
+ htt_stats_buf->be_ofdma_tx_bw,
+ ATH12K_HTT_TX_PDEV_NUM_BE_BW_CNTRS, "\n");
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_NUM_GI_CNTRS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "be_ofdma_tx_gi[%u]", i);
+ len += print_array_to_buf(buf, len, "", htt_stats_buf->gi[i],
+ ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS, "\n");
+ }
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_pdev_mbssid_ctrl_frame_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_mbssid_ctrl_frame_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_MBSSID_CTRL_FRAME_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "basic_trigger_across_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->basic_trigger_across_bss));
+ len += scnprintf(buf + len, buf_len - len, "basic_trigger_within_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->basic_trigger_within_bss));
+ len += scnprintf(buf + len, buf_len - len, "bsr_trigger_across_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->bsr_trigger_across_bss));
+ len += scnprintf(buf + len, buf_len - len, "bsr_trigger_within_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->bsr_trigger_within_bss));
+ len += scnprintf(buf + len, buf_len - len, "mu_rts_across_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_rts_across_bss));
+ len += scnprintf(buf + len, buf_len - len, "mu_rts_within_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_rts_within_bss));
+ len += scnprintf(buf + len, buf_len - len, "ul_mumimo_trigger_across_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_mumimo_trigger_across_bss));
+ len += scnprintf(buf + len, buf_len - len,
+ "ul_mumimo_trigger_within_bss = %u\n\n",
+ le32_to_cpu(htt_stats_buf->ul_mumimo_trigger_within_bss));
+
+ stats_req->buf_len = len;
+}
+
+static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
+ u16 tag, u16 len, const void *tag_buf,
+ void *user_data)
+{
+ struct debug_htt_stats_req *stats_req = user_data;
+
+ switch (tag) {
+ case HTT_STATS_TX_PDEV_CMN_TAG:
+ htt_print_tx_pdev_stats_cmn_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_UNDERRUN_TAG:
+ htt_print_tx_pdev_stats_urrn_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_SIFS_TAG:
+ htt_print_tx_pdev_stats_sifs_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_FLUSH_TAG:
+ htt_print_tx_pdev_stats_flush_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_SIFS_HIST_TAG:
+ htt_print_tx_pdev_stats_sifs_hist_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG:
+ htt_print_pdev_ctrl_path_tx_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_MU_PPDU_DIST_TAG:
+ htt_print_tx_pdev_mu_ppdu_dist_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_SCHED_CMN_TAG:
+ ath12k_htt_print_stats_tx_sched_cmn_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG:
+ ath12k_htt_print_tx_pdev_stats_sched_per_txq_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG:
+ ath12k_htt_print_sched_txq_cmd_posted_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG:
+ ath12k_htt_print_sched_txq_cmd_reaped_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG:
+ ath12k_htt_print_sched_txq_sched_order_su_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG:
+ ath12k_htt_print_sched_txq_sched_ineligibility_tlv(tag_buf, len,
+ stats_req);
+ break;
+ case HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG:
+ ath12k_htt_print_sched_txq_supercycle_trigger_tlv(tag_buf, len,
+ stats_req);
+ break;
+ case HTT_STATS_HW_PDEV_ERRS_TAG:
+ ath12k_htt_print_hw_stats_pdev_errs_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_HW_INTR_MISC_TAG:
+ ath12k_htt_print_hw_stats_intr_misc_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_WHAL_TX_TAG:
+ ath12k_htt_print_hw_stats_whal_tx_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_HW_WAR_TAG:
+ ath12k_htt_print_hw_war_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_CMN_TAG:
+ ath12k_htt_print_tx_tqm_cmn_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_ERROR_STATS_TAG:
+ ath12k_htt_print_tx_tqm_error_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_GEN_MPDU_TAG:
+ ath12k_htt_print_tx_tqm_gen_mpdu_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_LIST_MPDU_TAG:
+ ath12k_htt_print_tx_tqm_list_mpdu_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG:
+ ath12k_htt_print_tx_tqm_list_mpdu_cnt_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_PDEV_TAG:
+ ath12k_htt_print_tx_tqm_pdev_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_DE_CMN_TAG:
+ ath12k_htt_print_tx_de_cmn_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_DE_EAPOL_PACKETS_TAG:
+ ath12k_htt_print_tx_de_eapol_packets_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_DE_CLASSIFY_STATS_TAG:
+ ath12k_htt_print_tx_de_classify_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG:
+ ath12k_htt_print_tx_de_classify_failed_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG:
+ ath12k_htt_print_tx_de_classify_status_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG:
+ ath12k_htt_print_tx_de_enqueue_packets_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG:
+ ath12k_htt_print_tx_de_enqueue_discard_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_DE_COMPL_STATS_TAG:
+ ath12k_htt_print_tx_de_compl_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_SELFGEN_CMN_STATS_TAG:
+ ath12k_htt_print_tx_selfgen_cmn_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_SELFGEN_AC_STATS_TAG:
+ ath12k_htt_print_tx_selfgen_ac_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_SELFGEN_AX_STATS_TAG:
+ ath12k_htt_print_tx_selfgen_ax_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_SELFGEN_BE_STATS_TAG:
+ ath12k_htt_print_tx_selfgen_be_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_SELFGEN_AC_ERR_STATS_TAG:
+ ath12k_htt_print_tx_selfgen_ac_err_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_SELFGEN_AX_ERR_STATS_TAG:
+ ath12k_htt_print_tx_selfgen_ax_err_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_SELFGEN_BE_ERR_STATS_TAG:
+ ath12k_htt_print_tx_selfgen_be_err_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_SELFGEN_AC_SCHED_STATUS_STATS_TAG:
+ ath12k_htt_print_tx_selfgen_ac_sched_status_stats_tlv(tag_buf, len,
+ stats_req);
+ break;
+ case HTT_STATS_TX_SELFGEN_AX_SCHED_STATUS_STATS_TAG:
+ ath12k_htt_print_tx_selfgen_ax_sched_status_stats_tlv(tag_buf, len,
+ stats_req);
+ break;
+ case HTT_STATS_TX_SELFGEN_BE_SCHED_STATUS_STATS_TAG:
+ ath12k_htt_print_tx_selfgen_be_sched_status_stats_tlv(tag_buf, len,
+ stats_req);
+ break;
+ case HTT_STATS_STRING_TAG:
+ ath12k_htt_print_stats_string_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_SRING_STATS_TAG:
+ ath12k_htt_print_sring_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_SFM_CMN_TAG:
+ ath12k_htt_print_sfm_cmn_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_SFM_CLIENT_TAG:
+ ath12k_htt_print_sfm_client_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_SFM_CLIENT_USER_TAG:
+ ath12k_htt_print_sfm_client_user_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG:
+ ath12k_htt_print_tx_pdev_mu_mimo_sch_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_MUMIMO_GRP_STATS_TAG:
+ ath12k_htt_print_tx_pdev_mumimo_grp_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_MPDU_STATS_TAG:
+ ath12k_htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PDEV_CCA_1SEC_HIST_TAG:
+ case HTT_STATS_PDEV_CCA_100MSEC_HIST_TAG:
+ case HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG:
+ ath12k_htt_print_pdev_cca_stats_hist_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PDEV_CCA_COUNTERS_TAG:
+ ath12k_htt_print_pdev_stats_cca_counters_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PDEV_OBSS_PD_TAG:
+ ath12k_htt_print_pdev_obss_pd_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG:
+ ath12k_htt_print_pdev_tx_rate_txbf_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_AX_NDPA_STATS_TAG:
+ ath12k_htt_print_txbf_ofdma_ax_ndpa_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_AX_NDP_STATS_TAG:
+ ath12k_htt_print_txbf_ofdma_ax_ndp_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_AX_BRP_STATS_TAG:
+ ath12k_htt_print_txbf_ofdma_ax_brp_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_AX_STEER_STATS_TAG:
+ ath12k_htt_print_txbf_ofdma_ax_steer_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_AX_STEER_MPDU_STATS_TAG:
+ ath12k_htt_print_txbf_ofdma_ax_steer_mpdu_stats_tlv(tag_buf, len,
+ stats_req);
+ break;
+ case HTT_STATS_DLPAGER_STATS_TAG:
+ ath12k_htt_print_dlpager_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PHY_STATS_TAG:
+ ath12k_htt_print_phy_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PHY_COUNTERS_TAG:
+ ath12k_htt_print_phy_counters_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PHY_RESET_STATS_TAG:
+ ath12k_htt_print_phy_reset_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PHY_RESET_COUNTERS_TAG:
+ ath12k_htt_print_phy_reset_counters_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PHY_TPC_STATS_TAG:
+ ath12k_htt_print_phy_tpc_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_SOC_TXRX_STATS_COMMON_TAG:
+ ath12k_htt_print_soc_txrx_stats_common_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PER_RATE_STATS_TAG:
+ ath12k_htt_print_tx_per_rate_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_AST_ENTRY_TAG:
+ ath12k_htt_print_ast_entry_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PDEV_PUNCTURE_STATS_TAG:
+ ath12k_htt_print_puncture_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_DMAC_RESET_STATS_TAG:
+ ath12k_htt_print_dmac_reset_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PDEV_SCHED_ALGO_OFDMA_STATS_TAG:
+ ath12k_htt_print_pdev_sched_algo_ofdma_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_RATE_STATS_BE_OFDMA_TAG:
+ ath12k_htt_print_tx_pdev_rate_stats_be_ofdma_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PDEV_MBSSID_CTRL_FRAME_STATS_TAG:
+ ath12k_htt_print_pdev_mbssid_ctrl_frame_stats_tlv(tag_buf, len,
+ stats_req);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void ath12k_debugfs_htt_ext_stats_handler(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k_htt_extd_stats_msg *msg;
+ struct debug_htt_stats_req *stats_req;
+ struct ath12k *ar;
+ u32 len, pdev_id, stats_info;
+ u64 cookie;
+ int ret;
+ bool send_completion = false;
+
+ msg = (struct ath12k_htt_extd_stats_msg *)skb->data;
+ cookie = le64_to_cpu(msg->cookie);
+
+ if (u64_get_bits(cookie, ATH12K_HTT_STATS_COOKIE_MSB) !=
+ ATH12K_HTT_STATS_MAGIC_VALUE) {
+ ath12k_warn(ab, "received invalid htt ext stats event\n");
+ return;
+ }
+
+ pdev_id = u64_get_bits(cookie, ATH12K_HTT_STATS_COOKIE_LSB);
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ if (!ar) {
+ ath12k_warn(ab, "failed to get ar for pdev_id %d\n", pdev_id);
+ goto exit;
+ }
+
+ stats_req = ar->debug.htt_stats.stats_req;
+ if (!stats_req)
+ goto exit;
+
+ spin_lock_bh(&ar->data_lock);
+
+ stats_info = le32_to_cpu(msg->info1);
+ stats_req->done = u32_get_bits(stats_info, ATH12K_HTT_T2H_EXT_STATS_INFO1_DONE);
+ if (stats_req->done)
+ send_completion = true;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ len = u32_get_bits(stats_info, ATH12K_HTT_T2H_EXT_STATS_INFO1_LENGTH);
+ if (len > skb->len) {
+ ath12k_warn(ab, "invalid length %d for HTT stats", len);
+ goto exit;
+ }
+
+ ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len,
+ ath12k_dbg_htt_ext_stats_parse,
+ stats_req);
+ if (ret)
+ ath12k_warn(ab, "Failed to parse tlv %d\n", ret);
+
+ if (send_completion)
+ complete(&stats_req->htt_stats_rcvd);
+exit:
+ rcu_read_unlock();
+}
+
+static ssize_t ath12k_read_htt_stats_type(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k *ar = file->private_data;
+ enum ath12k_dbg_htt_ext_stats_type type;
+ char buf[32];
+ size_t len;
+
+ wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
+ type = ar->debug.htt_stats.type;
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
+
+ len = scnprintf(buf, sizeof(buf), "%u\n", type);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath12k_write_htt_stats_type(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k *ar = file->private_data;
+ enum ath12k_dbg_htt_ext_stats_type type;
+ unsigned int cfg_param[4] = {0};
+ const int size = 32;
+ int num_args;
+
+ char *buf __free(kfree) = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ num_args = sscanf(buf, "%u %u %u %u %u\n", &type, &cfg_param[0],
+ &cfg_param[1], &cfg_param[2], &cfg_param[3]);
+ if (!num_args || num_args > 5)
+ return -EINVAL;
+
+ if (type == ATH12K_DBG_HTT_EXT_STATS_RESET ||
+ type >= ATH12K_DBG_HTT_NUM_EXT_STATS)
+ return -EINVAL;
+
+ wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
+
+ ar->debug.htt_stats.type = type;
+ ar->debug.htt_stats.cfg_param[0] = cfg_param[0];
+ ar->debug.htt_stats.cfg_param[1] = cfg_param[1];
+ ar->debug.htt_stats.cfg_param[2] = cfg_param[2];
+ ar->debug.htt_stats.cfg_param[3] = cfg_param[3];
+
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
+
+ return count;
+}
+
+static const struct file_operations fops_htt_stats_type = {
+ .read = ath12k_read_htt_stats_type,
+ .write = ath12k_write_htt_stats_type,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath12k_debugfs_htt_stats_req(struct ath12k *ar)
+{
+ struct debug_htt_stats_req *stats_req = ar->debug.htt_stats.stats_req;
+ enum ath12k_dbg_htt_ext_stats_type type = stats_req->type;
+ u64 cookie;
+ int ret, pdev_id;
+ struct htt_ext_stats_cfg_params cfg_params = { 0 };
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ init_completion(&stats_req->htt_stats_rcvd);
+
+ pdev_id = ath12k_mac_get_target_pdev_id(ar);
+ stats_req->done = false;
+ stats_req->pdev_id = pdev_id;
+
+ cookie = u64_encode_bits(ATH12K_HTT_STATS_MAGIC_VALUE,
+ ATH12K_HTT_STATS_COOKIE_MSB);
+ cookie |= u64_encode_bits(pdev_id, ATH12K_HTT_STATS_COOKIE_LSB);
+
+ if (stats_req->override_cfg_param) {
+ cfg_params.cfg0 = stats_req->cfg_param[0];
+ cfg_params.cfg1 = stats_req->cfg_param[1];
+ cfg_params.cfg2 = stats_req->cfg_param[2];
+ cfg_params.cfg3 = stats_req->cfg_param[3];
+ }
+
+ ret = ath12k_dp_tx_htt_h2t_ext_stats_req(ar, type, &cfg_params, cookie);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send htt stats request: %d\n", ret);
+ return ret;
+ }
+ if (!wait_for_completion_timeout(&stats_req->htt_stats_rcvd, 3 * HZ)) {
+ spin_lock_bh(&ar->data_lock);
+ if (!stats_req->done) {
+ stats_req->done = true;
+ spin_unlock_bh(&ar->data_lock);
+ ath12k_warn(ar->ab, "stats request timed out\n");
+ return -ETIMEDOUT;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ return 0;
+}
+
+static int ath12k_open_htt_stats(struct inode *inode,
+ struct file *file)
+{
+ struct ath12k *ar = inode->i_private;
+ struct debug_htt_stats_req *stats_req;
+ enum ath12k_dbg_htt_ext_stats_type type = ar->debug.htt_stats.type;
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+ int ret;
+
+ if (type == ATH12K_DBG_HTT_EXT_STATS_RESET)
+ return -EPERM;
+
+ wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (ah->state != ATH12K_HW_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ if (ar->debug.htt_stats.stats_req) {
+ ret = -EAGAIN;
+ goto err_unlock;
+ }
+
+ stats_req = kzalloc(sizeof(*stats_req) + ATH12K_HTT_STATS_BUF_SIZE, GFP_KERNEL);
+ if (!stats_req) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ ar->debug.htt_stats.stats_req = stats_req;
+ stats_req->type = type;
+ stats_req->cfg_param[0] = ar->debug.htt_stats.cfg_param[0];
+ stats_req->cfg_param[1] = ar->debug.htt_stats.cfg_param[1];
+ stats_req->cfg_param[2] = ar->debug.htt_stats.cfg_param[2];
+ stats_req->cfg_param[3] = ar->debug.htt_stats.cfg_param[3];
+ stats_req->override_cfg_param = !!stats_req->cfg_param[0] ||
+ !!stats_req->cfg_param[1] ||
+ !!stats_req->cfg_param[2] ||
+ !!stats_req->cfg_param[3];
+
+ ret = ath12k_debugfs_htt_stats_req(ar);
+ if (ret < 0)
+ goto out;
+
+ file->private_data = stats_req;
+
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
+
+ return 0;
+out:
+ kfree(stats_req);
+ ar->debug.htt_stats.stats_req = NULL;
+err_unlock:
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
+
+ return ret;
+}
+
+static int ath12k_release_htt_stats(struct inode *inode,
+ struct file *file)
+{
+ struct ath12k *ar = inode->i_private;
+
+ wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
+ kfree(file->private_data);
+ ar->debug.htt_stats.stats_req = NULL;
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
+
+ return 0;
+}
+
+static ssize_t ath12k_read_htt_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct debug_htt_stats_req *stats_req = file->private_data;
+ char *buf;
+ u32 length;
+
+ buf = stats_req->buf;
+ length = min_t(u32, stats_req->buf_len, ATH12K_HTT_STATS_BUF_SIZE);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, length);
+}
+
+static const struct file_operations fops_dump_htt_stats = {
+ .open = ath12k_open_htt_stats,
+ .release = ath12k_release_htt_stats,
+ .read = ath12k_read_htt_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath12k_write_htt_stats_reset(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k *ar = file->private_data;
+ enum ath12k_dbg_htt_ext_stats_type type;
+ struct htt_ext_stats_cfg_params cfg_params = { 0 };
+ u8 param_pos;
+ int ret;
+
+ ret = kstrtou32_from_user(user_buf, count, 0, &type);
+ if (ret)
+ return ret;
+
+ if (type >= ATH12K_DBG_HTT_NUM_EXT_STATS ||
+ type == ATH12K_DBG_HTT_EXT_STATS_RESET)
+ return -E2BIG;
+
+ wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
+ cfg_params.cfg0 = HTT_STAT_DEFAULT_RESET_START_OFFSET;
+ param_pos = (type >> 5) + 1;
+
+ switch (param_pos) {
+ case ATH12K_HTT_STATS_RESET_PARAM_CFG_32_BYTES:
+ cfg_params.cfg1 = 1 << (cfg_params.cfg0 + type);
+ break;
+ case ATH12K_HTT_STATS_RESET_PARAM_CFG_64_BYTES:
+ cfg_params.cfg2 = ATH12K_HTT_STATS_RESET_BITMAP32_BIT(cfg_params.cfg0 +
+ type);
+ break;
+ case ATH12K_HTT_STATS_RESET_PARAM_CFG_128_BYTES:
+ cfg_params.cfg3 = ATH12K_HTT_STATS_RESET_BITMAP64_BIT(cfg_params.cfg0 +
+ type);
+ break;
+ default:
+ break;
+ }
+
+ ret = ath12k_dp_tx_htt_h2t_ext_stats_req(ar,
+ ATH12K_DBG_HTT_EXT_STATS_RESET,
+ &cfg_params,
+ 0ULL);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send htt stats request: %d\n", ret);
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
+ return ret;
+ }
+
+ ar->debug.htt_stats.reset = type;
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
+
+ return count;
+}
+
+static const struct file_operations fops_htt_stats_reset = {
+ .write = ath12k_write_htt_stats_reset,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath12k_debugfs_htt_stats_register(struct ath12k *ar)
+{
+ debugfs_create_file("htt_stats_type", 0600, ar->debug.debugfs_pdev,
+ ar, &fops_htt_stats_type);
+ debugfs_create_file("htt_stats", 0400, ar->debug.debugfs_pdev,
+ ar, &fops_dump_htt_stats);
+ debugfs_create_file("htt_stats_reset", 0200, ar->debug.debugfs_pdev,
+ ar, &fops_htt_stats_reset);
+}
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
new file mode 100644
index 000000000000..4b59976fbc35
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
@@ -0,0 +1,1494 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef DEBUG_HTT_STATS_H
+#define DEBUG_HTT_STATS_H
+
+#define ATH12K_HTT_STATS_BUF_SIZE (1024 * 512)
+#define ATH12K_HTT_STATS_COOKIE_LSB GENMASK_ULL(31, 0)
+#define ATH12K_HTT_STATS_COOKIE_MSB GENMASK_ULL(63, 32)
+#define ATH12K_HTT_STATS_MAGIC_VALUE 0xF0F0F0F0
+#define ATH12K_HTT_STATS_SUBTYPE_MAX 16
+#define ATH12K_HTT_MAX_STRING_LEN 256
+
+#define ATH12K_HTT_STATS_RESET_BITMAP32_OFFSET(_idx) ((_idx) & 0x1f)
+#define ATH12K_HTT_STATS_RESET_BITMAP64_OFFSET(_idx) ((_idx) & 0x3f)
+#define ATH12K_HTT_STATS_RESET_BITMAP32_BIT(_idx) (1 << \
+ ATH12K_HTT_STATS_RESET_BITMAP32_OFFSET(_idx))
+#define ATH12K_HTT_STATS_RESET_BITMAP64_BIT(_idx) (1 << \
+ ATH12K_HTT_STATS_RESET_BITMAP64_OFFSET(_idx))
+
+void ath12k_debugfs_htt_stats_register(struct ath12k *ar);
+
+#ifdef CONFIG_ATH12K_DEBUGFS
+void ath12k_debugfs_htt_ext_stats_handler(struct ath12k_base *ab,
+ struct sk_buff *skb);
+#else /* CONFIG_ATH12K_DEBUGFS */
+static inline void ath12k_debugfs_htt_ext_stats_handler(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+}
+#endif
+
+/**
+ * DOC: target -> host extended statistics upload
+ *
+ * The following field definitions describe the format of the HTT
+ * target to host stats upload confirmation message.
+ * The message contains a cookie echoed from the HTT host->target stats
+ * upload request, which identifies which request the confirmation is
+ * for, and a single stats can span over multiple HTT stats indication
+ * due to the HTT message size limitation so every HTT ext stats
+ * indication will have tag-length-value stats information elements.
+ * The tag-length header for each HTT stats IND message also includes a
+ * status field, to indicate whether the request for the stat type in
+ * question was fully met, partially met, unable to be met, or invalid
+ * (if the stat type in question is disabled in the target).
+ * A Done bit 1's indicate the end of the of stats info elements.
+ *
+ *
+ * |31 16|15 12|11|10 8|7 5|4 0|
+ * |--------------------------------------------------------------|
+ * | reserved | msg type |
+ * |--------------------------------------------------------------|
+ * | cookie LSBs |
+ * |--------------------------------------------------------------|
+ * | cookie MSBs |
+ * |--------------------------------------------------------------|
+ * | stats entry length | rsvd | D| S | stat type |
+ * |--------------------------------------------------------------|
+ * | type-specific stats info |
+ * | (see debugfs_htt_stats.h) |
+ * |--------------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: Identifies this is a extended statistics upload confirmation
+ * message.
+ * Value: 0x1c
+ * - COOKIE_LSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: MSBs of the opaque cookie specified by the host-side requestor
+ * - COOKIE_MSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: MSBs of the opaque cookie specified by the host-side requestor
+ *
+ * Stats Information Element tag-length header fields:
+ * - STAT_TYPE
+ * Bits 7:0
+ * Purpose: identifies the type of statistics info held in the
+ * following information element
+ * Value: ath12k_dbg_htt_ext_stats_type
+ * - STATUS
+ * Bits 10:8
+ * Purpose: indicate whether the requested stats are present
+ * Value:
+ * 0 -> The requested stats have been delivered in full
+ * 1 -> The requested stats have been delivered in part
+ * 2 -> The requested stats could not be delivered (error case)
+ * 3 -> The requested stat type is either not recognized (invalid)
+ * - DONE
+ * Bits 11
+ * Purpose:
+ * Indicates the completion of the stats entry, this will be the last
+ * stats conf HTT segment for the requested stats type.
+ * Value:
+ * 0 -> the stats retrieval is ongoing
+ * 1 -> the stats retrieval is complete
+ * - LENGTH
+ * Bits 31:16
+ * Purpose: indicate the stats information size
+ * Value: This field specifies the number of bytes of stats information
+ * that follows the element tag-length header.
+ * It is expected but not required that this length is a multiple of
+ * 4 bytes.
+ */
+
+#define ATH12K_HTT_T2H_EXT_STATS_INFO1_DONE BIT(11)
+#define ATH12K_HTT_T2H_EXT_STATS_INFO1_LENGTH GENMASK(31, 16)
+
+struct ath12k_htt_extd_stats_msg {
+ __le32 info0;
+ __le64 cookie;
+ __le32 info1;
+ u8 data[];
+} __packed;
+
+/* htt_dbg_ext_stats_type */
+enum ath12k_dbg_htt_ext_stats_type {
+ ATH12K_DBG_HTT_EXT_STATS_RESET = 0,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TX = 1,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
+ ATH12K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8,
+ ATH12K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12,
+ ATH12K_DBG_HTT_EXT_STATS_SRNG_INFO = 15,
+ ATH12K_DBG_HTT_EXT_STATS_SFM_INFO = 16,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_RATE_TXBF = 31,
+ ATH12K_DBG_HTT_EXT_STATS_TXBF_OFDMA = 32,
+ ATH12K_DBG_HTT_EXT_STATS_DLPAGER_STATS = 36,
+ ATH12K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS = 37,
+ ATH12K_DBG_HTT_EXT_VDEVS_TXRX_STATS = 38,
+ ATH12K_DBG_HTT_EXT_PDEV_PER_STATS = 40,
+ ATH12K_DBG_HTT_EXT_AST_ENTRIES = 41,
+ ATH12K_DBG_HTT_EXT_STATS_SOC_ERROR = 45,
+ ATH12K_DBG_HTT_DBG_PDEV_PUNCTURE_STATS = 46,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_SCHED_ALGO = 49,
+ ATH12K_DBG_HTT_EXT_STATS_MANDATORY_MUOFDMA = 51,
+ ATH12K_DGB_HTT_EXT_STATS_PDEV_MBSSID_CTRL_FRAME = 54,
+
+ /* keep this last */
+ ATH12K_DBG_HTT_NUM_EXT_STATS,
+};
+
+enum ath12k_dbg_htt_tlv_tag {
+ HTT_STATS_TX_PDEV_CMN_TAG = 0,
+ HTT_STATS_TX_PDEV_UNDERRUN_TAG = 1,
+ HTT_STATS_TX_PDEV_SIFS_TAG = 2,
+ HTT_STATS_TX_PDEV_FLUSH_TAG = 3,
+ HTT_STATS_STRING_TAG = 5,
+ HTT_STATS_TX_TQM_GEN_MPDU_TAG = 11,
+ HTT_STATS_TX_TQM_LIST_MPDU_TAG = 12,
+ HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG = 13,
+ HTT_STATS_TX_TQM_CMN_TAG = 14,
+ HTT_STATS_TX_TQM_PDEV_TAG = 15,
+ HTT_STATS_TX_DE_EAPOL_PACKETS_TAG = 17,
+ HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG = 18,
+ HTT_STATS_TX_DE_CLASSIFY_STATS_TAG = 19,
+ HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG = 20,
+ HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG = 21,
+ HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG = 22,
+ HTT_STATS_TX_DE_CMN_TAG = 23,
+ HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG = 25,
+ HTT_STATS_SFM_CMN_TAG = 26,
+ HTT_STATS_SRING_STATS_TAG = 27,
+ HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG = 36,
+ HTT_STATS_TX_SCHED_CMN_TAG = 37,
+ HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG = 39,
+ HTT_STATS_SFM_CLIENT_USER_TAG = 41,
+ HTT_STATS_SFM_CLIENT_TAG = 42,
+ HTT_STATS_TX_TQM_ERROR_STATS_TAG = 43,
+ HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG = 44,
+ HTT_STATS_TX_SELFGEN_AC_ERR_STATS_TAG = 46,
+ HTT_STATS_TX_SELFGEN_CMN_STATS_TAG = 47,
+ HTT_STATS_TX_SELFGEN_AC_STATS_TAG = 48,
+ HTT_STATS_TX_SELFGEN_AX_STATS_TAG = 49,
+ HTT_STATS_TX_SELFGEN_AX_ERR_STATS_TAG = 50,
+ HTT_STATS_HW_INTR_MISC_TAG = 54,
+ HTT_STATS_HW_PDEV_ERRS_TAG = 56,
+ HTT_STATS_TX_DE_COMPL_STATS_TAG = 65,
+ HTT_STATS_WHAL_TX_TAG = 66,
+ HTT_STATS_TX_PDEV_SIFS_HIST_TAG = 67,
+ HTT_STATS_PDEV_CCA_1SEC_HIST_TAG = 70,
+ HTT_STATS_PDEV_CCA_100MSEC_HIST_TAG = 71,
+ HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG = 72,
+ HTT_STATS_PDEV_CCA_COUNTERS_TAG = 73,
+ HTT_STATS_TX_PDEV_MPDU_STATS_TAG = 74,
+ HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86,
+ HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG = 87,
+ HTT_STATS_PDEV_OBSS_PD_TAG = 88,
+ HTT_STATS_HW_WAR_TAG = 89,
+ HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG = 100,
+ HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG = 102,
+ HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG = 108,
+ HTT_STATS_TX_SELFGEN_AC_SCHED_STATUS_STATS_TAG = 111,
+ HTT_STATS_TX_SELFGEN_AX_SCHED_STATUS_STATS_TAG = 112,
+ HTT_STATS_DLPAGER_STATS_TAG = 120,
+ HTT_STATS_PHY_COUNTERS_TAG = 121,
+ HTT_STATS_PHY_STATS_TAG = 122,
+ HTT_STATS_PHY_RESET_COUNTERS_TAG = 123,
+ HTT_STATS_PHY_RESET_STATS_TAG = 124,
+ HTT_STATS_SOC_TXRX_STATS_COMMON_TAG = 125,
+ HTT_STATS_PER_RATE_STATS_TAG = 128,
+ HTT_STATS_MU_PPDU_DIST_TAG = 129,
+ HTT_STATS_TX_PDEV_MUMIMO_GRP_STATS_TAG = 130,
+ HTT_STATS_AST_ENTRY_TAG = 132,
+ HTT_STATS_TX_PDEV_RATE_STATS_BE_OFDMA_TAG = 135,
+ HTT_STATS_TX_SELFGEN_BE_ERR_STATS_TAG = 137,
+ HTT_STATS_TX_SELFGEN_BE_STATS_TAG = 138,
+ HTT_STATS_TX_SELFGEN_BE_SCHED_STATUS_STATS_TAG = 139,
+ HTT_STATS_TXBF_OFDMA_AX_NDPA_STATS_TAG = 147,
+ HTT_STATS_TXBF_OFDMA_AX_NDP_STATS_TAG = 148,
+ HTT_STATS_TXBF_OFDMA_AX_BRP_STATS_TAG = 149,
+ HTT_STATS_TXBF_OFDMA_AX_STEER_STATS_TAG = 150,
+ HTT_STATS_DMAC_RESET_STATS_TAG = 155,
+ HTT_STATS_PHY_TPC_STATS_TAG = 157,
+ HTT_STATS_PDEV_PUNCTURE_STATS_TAG = 158,
+ HTT_STATS_PDEV_SCHED_ALGO_OFDMA_STATS_TAG = 165,
+ HTT_STATS_TXBF_OFDMA_AX_STEER_MPDU_STATS_TAG = 172,
+ HTT_STATS_PDEV_MBSSID_CTRL_FRAME_STATS_TAG = 176,
+
+ HTT_STATS_MAX_TAG,
+};
+
+#define ATH12K_HTT_STATS_MAC_ID GENMASK(7, 0)
+
+#define ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_STATS 9
+#define ATH12K_HTT_TX_PDEV_MAX_FLUSH_REASON_STATS 150
+
+/* MU MIMO distribution stats is a 2-dimensional array
+ * with dimension one denoting stats for nr4[0] or nr8[1]
+ */
+#define ATH12K_HTT_STATS_NUM_NR_BINS 2
+#define ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST 10
+#define ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS 10
+#define ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS 9
+#define ATH12K_HTT_STATS_NUM_SCHED_STATUS_WORDS \
+ (ATH12K_HTT_STATS_NUM_NR_BINS * ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS)
+#define ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS \
+ (ATH12K_HTT_STATS_NUM_NR_BINS * ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST)
+
+enum ath12k_htt_tx_pdev_underrun_enum {
+ HTT_STATS_TX_PDEV_NO_DATA_UNDERRUN = 0,
+ HTT_STATS_TX_PDEV_DATA_UNDERRUN_BETWEEN_MPDU = 1,
+ HTT_STATS_TX_PDEV_DATA_UNDERRUN_WITHIN_MPDU = 2,
+ HTT_TX_PDEV_MAX_URRN_STATS = 3,
+};
+
+enum ath12k_htt_stats_reset_cfg_param_alloc_pos {
+ ATH12K_HTT_STATS_RESET_PARAM_CFG_32_BYTES = 1,
+ ATH12K_HTT_STATS_RESET_PARAM_CFG_64_BYTES,
+ ATH12K_HTT_STATS_RESET_PARAM_CFG_128_BYTES,
+};
+
+struct debug_htt_stats_req {
+ bool done;
+ bool override_cfg_param;
+ u8 pdev_id;
+ enum ath12k_dbg_htt_ext_stats_type type;
+ u32 cfg_param[4];
+ u8 peer_addr[ETH_ALEN];
+ struct completion htt_stats_rcvd;
+ u32 buf_len;
+ u8 buf[];
+};
+
+struct ath12k_htt_tx_pdev_stats_cmn_tlv {
+ __le32 mac_id__word;
+ __le32 hw_queued;
+ __le32 hw_reaped;
+ __le32 underrun;
+ __le32 hw_paused;
+ __le32 hw_flush;
+ __le32 hw_filt;
+ __le32 tx_abort;
+ __le32 mpdu_requed;
+ __le32 tx_xretry;
+ __le32 data_rc;
+ __le32 mpdu_dropped_xretry;
+ __le32 illgl_rate_phy_err;
+ __le32 cont_xretry;
+ __le32 tx_timeout;
+ __le32 pdev_resets;
+ __le32 phy_underrun;
+ __le32 txop_ovf;
+ __le32 seq_posted;
+ __le32 seq_failed_queueing;
+ __le32 seq_completed;
+ __le32 seq_restarted;
+ __le32 mu_seq_posted;
+ __le32 seq_switch_hw_paused;
+ __le32 next_seq_posted_dsr;
+ __le32 seq_posted_isr;
+ __le32 seq_ctrl_cached;
+ __le32 mpdu_count_tqm;
+ __le32 msdu_count_tqm;
+ __le32 mpdu_removed_tqm;
+ __le32 msdu_removed_tqm;
+ __le32 mpdus_sw_flush;
+ __le32 mpdus_hw_filter;
+ __le32 mpdus_truncated;
+ __le32 mpdus_ack_failed;
+ __le32 mpdus_expired;
+ __le32 mpdus_seq_hw_retry;
+ __le32 ack_tlv_proc;
+ __le32 coex_abort_mpdu_cnt_valid;
+ __le32 coex_abort_mpdu_cnt;
+ __le32 num_total_ppdus_tried_ota;
+ __le32 num_data_ppdus_tried_ota;
+ __le32 local_ctrl_mgmt_enqued;
+ __le32 local_ctrl_mgmt_freed;
+ __le32 local_data_enqued;
+ __le32 local_data_freed;
+ __le32 mpdu_tried;
+ __le32 isr_wait_seq_posted;
+
+ __le32 tx_active_dur_us_low;
+ __le32 tx_active_dur_us_high;
+ __le32 remove_mpdus_max_retries;
+ __le32 comp_delivered;
+ __le32 ppdu_ok;
+ __le32 self_triggers;
+ __le32 tx_time_dur_data;
+ __le32 seq_qdepth_repost_stop;
+ __le32 mu_seq_min_msdu_repost_stop;
+ __le32 seq_min_msdu_repost_stop;
+ __le32 seq_txop_repost_stop;
+ __le32 next_seq_cancel;
+ __le32 fes_offsets_err_cnt;
+ __le32 num_mu_peer_blacklisted;
+ __le32 mu_ofdma_seq_posted;
+ __le32 ul_mumimo_seq_posted;
+ __le32 ul_ofdma_seq_posted;
+
+ __le32 thermal_suspend_cnt;
+ __le32 dfs_suspend_cnt;
+ __le32 tx_abort_suspend_cnt;
+ __le32 tgt_specific_opaque_txq_suspend_info;
+ __le32 last_suspend_reason;
+} __packed;
+
+struct ath12k_htt_tx_pdev_stats_urrn_tlv {
+ DECLARE_FLEX_ARRAY(__le32, urrn_stats);
+} __packed;
+
+struct ath12k_htt_tx_pdev_stats_flush_tlv {
+ DECLARE_FLEX_ARRAY(__le32, flush_errs);
+} __packed;
+
+struct ath12k_htt_tx_pdev_stats_phy_err_tlv {
+ DECLARE_FLEX_ARRAY(__le32, phy_errs);
+} __packed;
+
+struct ath12k_htt_tx_pdev_stats_sifs_tlv {
+ DECLARE_FLEX_ARRAY(__le32, sifs_status);
+} __packed;
+
+struct ath12k_htt_pdev_ctrl_path_tx_stats_tlv {
+ __le32 fw_tx_mgmt_subtype[ATH12K_HTT_STATS_SUBTYPE_MAX];
+} __packed;
+
+struct ath12k_htt_tx_pdev_stats_sifs_hist_tlv {
+ DECLARE_FLEX_ARRAY(__le32, sifs_hist_status);
+} __packed;
+
+enum ath12k_htt_stats_hw_mode {
+ ATH12K_HTT_STATS_HWMODE_AC = 0,
+ ATH12K_HTT_STATS_HWMODE_AX = 1,
+ ATH12K_HTT_STATS_HWMODE_BE = 2,
+};
+
+struct ath12k_htt_tx_pdev_mu_ppdu_dist_stats_tlv {
+ __le32 hw_mode;
+ __le32 num_seq_term_status[ATH12K_HTT_STATS_NUM_SCHED_STATUS_WORDS];
+ __le32 num_ppdu_cmpl_per_burst[ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS];
+ __le32 num_seq_posted[ATH12K_HTT_STATS_NUM_NR_BINS];
+ __le32 num_ppdu_posted_per_burst[ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS];
+} __packed;
+
+#define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID GENMASK(7, 0)
+#define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID GENMASK(15, 8)
+
+#define ATH12K_HTT_TX_PDEV_NUM_SCHED_ORDER_LOG 20
+
+struct ath12k_htt_stats_tx_sched_cmn_tlv {
+ __le32 mac_id__word;
+ __le32 current_timestamp;
+} __packed;
+
+struct ath12k_htt_tx_pdev_stats_sched_per_txq_tlv {
+ __le32 mac_id__word;
+ __le32 sched_policy;
+ __le32 last_sched_cmd_posted_timestamp;
+ __le32 last_sched_cmd_compl_timestamp;
+ __le32 sched_2_tac_lwm_count;
+ __le32 sched_2_tac_ring_full;
+ __le32 sched_cmd_post_failure;
+ __le32 num_active_tids;
+ __le32 num_ps_schedules;
+ __le32 sched_cmds_pending;
+ __le32 num_tid_register;
+ __le32 num_tid_unregister;
+ __le32 num_qstats_queried;
+ __le32 qstats_update_pending;
+ __le32 last_qstats_query_timestamp;
+ __le32 num_tqm_cmdq_full;
+ __le32 num_de_sched_algo_trigger;
+ __le32 num_rt_sched_algo_trigger;
+ __le32 num_tqm_sched_algo_trigger;
+ __le32 notify_sched;
+ __le32 dur_based_sendn_term;
+ __le32 su_notify2_sched;
+ __le32 su_optimal_queued_msdus_sched;
+ __le32 su_delay_timeout_sched;
+ __le32 su_min_txtime_sched_delay;
+ __le32 su_no_delay;
+ __le32 num_supercycles;
+ __le32 num_subcycles_with_sort;
+ __le32 num_subcycles_no_sort;
+} __packed;
+
+struct ath12k_htt_sched_txq_cmd_posted_tlv {
+ DECLARE_FLEX_ARRAY(__le32, sched_cmd_posted);
+} __packed;
+
+struct ath12k_htt_sched_txq_cmd_reaped_tlv {
+ DECLARE_FLEX_ARRAY(__le32, sched_cmd_reaped);
+} __packed;
+
+struct ath12k_htt_sched_txq_sched_order_su_tlv {
+ DECLARE_FLEX_ARRAY(__le32, sched_order_su);
+} __packed;
+
+struct ath12k_htt_sched_txq_sched_ineligibility_tlv {
+ DECLARE_FLEX_ARRAY(__le32, sched_ineligibility);
+} __packed;
+
+enum ath12k_htt_sched_txq_supercycle_triggers_tlv_enum {
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_NONE = 0,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_FORCED,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_LESS_NUM_TIDQ_ENTRIES,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_LESS_NUM_ACTIVE_TIDS,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_MAX_ITR_REACHED,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_DUR_THRESHOLD_REACHED,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_TWT_TRIGGER,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_MAX,
+};
+
+struct ath12k_htt_sched_txq_supercycle_triggers_tlv {
+ DECLARE_FLEX_ARRAY(__le32, supercycle_triggers);
+} __packed;
+
+struct ath12k_htt_hw_stats_pdev_errs_tlv {
+ __le32 mac_id__word;
+ __le32 tx_abort;
+ __le32 tx_abort_fail_count;
+ __le32 rx_abort;
+ __le32 rx_abort_fail_count;
+ __le32 warm_reset;
+ __le32 cold_reset;
+ __le32 tx_flush;
+ __le32 tx_glb_reset;
+ __le32 tx_txq_reset;
+ __le32 rx_timeout_reset;
+ __le32 mac_cold_reset_restore_cal;
+ __le32 mac_cold_reset;
+ __le32 mac_warm_reset;
+ __le32 mac_only_reset;
+ __le32 phy_warm_reset;
+ __le32 phy_warm_reset_ucode_trig;
+ __le32 mac_warm_reset_restore_cal;
+ __le32 mac_sfm_reset;
+ __le32 phy_warm_reset_m3_ssr;
+ __le32 phy_warm_reset_reason_phy_m3;
+ __le32 phy_warm_reset_reason_tx_hw_stuck;
+ __le32 phy_warm_reset_reason_num_rx_frame_stuck;
+ __le32 phy_warm_reset_reason_wal_rx_rec_rx_busy;
+ __le32 phy_warm_reset_reason_wal_rx_rec_mac_hng;
+ __le32 phy_warm_reset_reason_mac_conv_phy_reset;
+ __le32 wal_rx_recovery_rst_mac_hang_cnt;
+ __le32 wal_rx_recovery_rst_known_sig_cnt;
+ __le32 wal_rx_recovery_rst_no_rx_cnt;
+ __le32 wal_rx_recovery_rst_no_rx_consec_cnt;
+ __le32 wal_rx_recovery_rst_rx_busy_cnt;
+ __le32 wal_rx_recovery_rst_phy_mac_hang_cnt;
+ __le32 rx_flush_cnt;
+ __le32 phy_warm_reset_reason_tx_exp_cca_stuck;
+ __le32 phy_warm_reset_reason_tx_consec_flsh_war;
+ __le32 phy_warm_reset_reason_tx_hwsch_reset_war;
+ __le32 phy_warm_reset_reason_hwsch_cca_wdog_war;
+ __le32 fw_rx_rings_reset;
+ __le32 rx_dest_drain_rx_descs_leak_prevented;
+ __le32 rx_dest_drain_rx_descs_saved_cnt;
+ __le32 rx_dest_drain_rxdma2reo_leak_detected;
+ __le32 rx_dest_drain_rxdma2fw_leak_detected;
+ __le32 rx_dest_drain_rxdma2wbm_leak_detected;
+ __le32 rx_dest_drain_rxdma1_2sw_leak_detected;
+ __le32 rx_dest_drain_rx_drain_ok_mac_idle;
+ __le32 rx_dest_drain_ok_mac_not_idle;
+ __le32 rx_dest_drain_prerequisite_invld;
+ __le32 rx_dest_drain_skip_non_lmac_reset;
+ __le32 rx_dest_drain_hw_fifo_notempty_post_wait;
+} __packed;
+
+#define ATH12K_HTT_STATS_MAX_HW_INTR_NAME_LEN 8
+struct ath12k_htt_hw_stats_intr_misc_tlv {
+ u8 hw_intr_name[ATH12K_HTT_STATS_MAX_HW_INTR_NAME_LEN];
+ __le32 mask;
+ __le32 count;
+} __packed;
+
+struct ath12k_htt_hw_stats_whal_tx_tlv {
+ __le32 mac_id__word;
+ __le32 last_unpause_ppdu_id;
+ __le32 hwsch_unpause_wait_tqm_write;
+ __le32 hwsch_dummy_tlv_skipped;
+ __le32 hwsch_misaligned_offset_received;
+ __le32 hwsch_reset_count;
+ __le32 hwsch_dev_reset_war;
+ __le32 hwsch_delayed_pause;
+ __le32 hwsch_long_delayed_pause;
+ __le32 sch_rx_ppdu_no_response;
+ __le32 sch_selfgen_response;
+ __le32 sch_rx_sifs_resp_trigger;
+} __packed;
+
+struct ath12k_htt_hw_war_stats_tlv {
+ __le32 mac_id__word;
+ DECLARE_FLEX_ARRAY(__le32, hw_wars);
+} __packed;
+
+struct ath12k_htt_tx_tqm_cmn_stats_tlv {
+ __le32 mac_id__word;
+ __le32 max_cmdq_id;
+ __le32 list_mpdu_cnt_hist_intvl;
+ __le32 add_msdu;
+ __le32 q_empty;
+ __le32 q_not_empty;
+ __le32 drop_notification;
+ __le32 desc_threshold;
+ __le32 hwsch_tqm_invalid_status;
+ __le32 missed_tqm_gen_mpdus;
+ __le32 tqm_active_tids;
+ __le32 tqm_inactive_tids;
+ __le32 tqm_active_msduq_flows;
+ __le32 msduq_timestamp_updates;
+ __le32 msduq_updates_mpdu_head_info_cmd;
+ __le32 msduq_updates_emp_to_nonemp_status;
+ __le32 get_mpdu_head_info_cmds_by_query;
+ __le32 get_mpdu_head_info_cmds_by_tac;
+ __le32 gen_mpdu_cmds_by_query;
+ __le32 high_prio_q_not_empty;
+} __packed;
+
+struct ath12k_htt_tx_tqm_error_stats_tlv {
+ __le32 q_empty_failure;
+ __le32 q_not_empty_failure;
+ __le32 add_msdu_failure;
+ __le32 tqm_cache_ctl_err;
+ __le32 tqm_soft_reset;
+ __le32 tqm_reset_num_in_use_link_descs;
+ __le32 tqm_reset_num_lost_link_descs;
+ __le32 tqm_reset_num_lost_host_tx_buf_cnt;
+ __le32 tqm_reset_num_in_use_internal_tqm;
+ __le32 tqm_reset_num_in_use_idle_link_rng;
+ __le32 tqm_reset_time_to_tqm_hang_delta_ms;
+ __le32 tqm_reset_recovery_time_ms;
+ __le32 tqm_reset_num_peers_hdl;
+ __le32 tqm_reset_cumm_dirty_hw_mpduq_cnt;
+ __le32 tqm_reset_cumm_dirty_hw_msduq_proc;
+ __le32 tqm_reset_flush_cache_cmd_su_cnt;
+ __le32 tqm_reset_flush_cache_cmd_other_cnt;
+ __le32 tqm_reset_flush_cache_cmd_trig_type;
+ __le32 tqm_reset_flush_cache_cmd_trig_cfg;
+ __le32 tqm_reset_flush_cmd_skp_status_null;
+} __packed;
+
+struct ath12k_htt_tx_tqm_gen_mpdu_stats_tlv {
+ DECLARE_FLEX_ARRAY(__le32, gen_mpdu_end_reason);
+} __packed;
+
+#define ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_END_REASON 16
+#define ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS 16
+
+struct ath12k_htt_tx_tqm_list_mpdu_stats_tlv {
+ DECLARE_FLEX_ARRAY(__le32, list_mpdu_end_reason);
+} __packed;
+
+struct ath12k_htt_tx_tqm_list_mpdu_cnt_tlv {
+ DECLARE_FLEX_ARRAY(__le32, list_mpdu_cnt_hist);
+} __packed;
+
+struct ath12k_htt_tx_tqm_pdev_stats_tlv {
+ __le32 msdu_count;
+ __le32 mpdu_count;
+ __le32 remove_msdu;
+ __le32 remove_mpdu;
+ __le32 remove_msdu_ttl;
+ __le32 send_bar;
+ __le32 bar_sync;
+ __le32 notify_mpdu;
+ __le32 sync_cmd;
+ __le32 write_cmd;
+ __le32 hwsch_trigger;
+ __le32 ack_tlv_proc;
+ __le32 gen_mpdu_cmd;
+ __le32 gen_list_cmd;
+ __le32 remove_mpdu_cmd;
+ __le32 remove_mpdu_tried_cmd;
+ __le32 mpdu_queue_stats_cmd;
+ __le32 mpdu_head_info_cmd;
+ __le32 msdu_flow_stats_cmd;
+ __le32 remove_msdu_cmd;
+ __le32 remove_msdu_ttl_cmd;
+ __le32 flush_cache_cmd;
+ __le32 update_mpduq_cmd;
+ __le32 enqueue;
+ __le32 enqueue_notify;
+ __le32 notify_mpdu_at_head;
+ __le32 notify_mpdu_state_valid;
+ __le32 sched_udp_notify1;
+ __le32 sched_udp_notify2;
+ __le32 sched_nonudp_notify1;
+ __le32 sched_nonudp_notify2;
+} __packed;
+
+struct ath12k_htt_tx_de_cmn_stats_tlv {
+ __le32 mac_id__word;
+ __le32 tcl2fw_entry_count;
+ __le32 not_to_fw;
+ __le32 invalid_pdev_vdev_peer;
+ __le32 tcl_res_invalid_addrx;
+ __le32 wbm2fw_entry_count;
+ __le32 invalid_pdev;
+ __le32 tcl_res_addrx_timeout;
+ __le32 invalid_vdev;
+ __le32 invalid_tcl_exp_frame_desc;
+ __le32 vdev_id_mismatch_cnt;
+} __packed;
+
+struct ath12k_htt_tx_de_eapol_packets_stats_tlv {
+ __le32 m1_packets;
+ __le32 m2_packets;
+ __le32 m3_packets;
+ __le32 m4_packets;
+ __le32 g1_packets;
+ __le32 g2_packets;
+ __le32 rc4_packets;
+ __le32 eap_packets;
+ __le32 eapol_start_packets;
+ __le32 eapol_logoff_packets;
+ __le32 eapol_encap_asf_packets;
+} __packed;
+
+struct ath12k_htt_tx_de_classify_stats_tlv {
+ __le32 arp_packets;
+ __le32 igmp_packets;
+ __le32 dhcp_packets;
+ __le32 host_inspected;
+ __le32 htt_included;
+ __le32 htt_valid_mcs;
+ __le32 htt_valid_nss;
+ __le32 htt_valid_preamble_type;
+ __le32 htt_valid_chainmask;
+ __le32 htt_valid_guard_interval;
+ __le32 htt_valid_retries;
+ __le32 htt_valid_bw_info;
+ __le32 htt_valid_power;
+ __le32 htt_valid_key_flags;
+ __le32 htt_valid_no_encryption;
+ __le32 fse_entry_count;
+ __le32 fse_priority_be;
+ __le32 fse_priority_high;
+ __le32 fse_priority_low;
+ __le32 fse_traffic_ptrn_be;
+ __le32 fse_traffic_ptrn_over_sub;
+ __le32 fse_traffic_ptrn_bursty;
+ __le32 fse_traffic_ptrn_interactive;
+ __le32 fse_traffic_ptrn_periodic;
+ __le32 fse_hwqueue_alloc;
+ __le32 fse_hwqueue_created;
+ __le32 fse_hwqueue_send_to_host;
+ __le32 mcast_entry;
+ __le32 bcast_entry;
+ __le32 htt_update_peer_cache;
+ __le32 htt_learning_frame;
+ __le32 fse_invalid_peer;
+ __le32 mec_notify;
+} __packed;
+
+struct ath12k_htt_tx_de_classify_failed_stats_tlv {
+ __le32 ap_bss_peer_not_found;
+ __le32 ap_bcast_mcast_no_peer;
+ __le32 sta_delete_in_progress;
+ __le32 ibss_no_bss_peer;
+ __le32 invalid_vdev_type;
+ __le32 invalid_ast_peer_entry;
+ __le32 peer_entry_invalid;
+ __le32 ethertype_not_ip;
+ __le32 eapol_lookup_failed;
+ __le32 qpeer_not_allow_data;
+ __le32 fse_tid_override;
+ __le32 ipv6_jumbogram_zero_length;
+ __le32 qos_to_non_qos_in_prog;
+ __le32 ap_bcast_mcast_eapol;
+ __le32 unicast_on_ap_bss_peer;
+ __le32 ap_vdev_invalid;
+ __le32 incomplete_llc;
+ __le32 eapol_duplicate_m3;
+ __le32 eapol_duplicate_m4;
+} __packed;
+
+struct ath12k_htt_tx_de_classify_status_stats_tlv {
+ __le32 eok;
+ __le32 classify_done;
+ __le32 lookup_failed;
+ __le32 send_host_dhcp;
+ __le32 send_host_mcast;
+ __le32 send_host_unknown_dest;
+ __le32 send_host;
+ __le32 status_invalid;
+} __packed;
+
+struct ath12k_htt_tx_de_enqueue_packets_stats_tlv {
+ __le32 enqueued_pkts;
+ __le32 to_tqm;
+ __le32 to_tqm_bypass;
+} __packed;
+
+struct ath12k_htt_tx_de_enqueue_discard_stats_tlv {
+ __le32 discarded_pkts;
+ __le32 local_frames;
+ __le32 is_ext_msdu;
+} __packed;
+
+struct ath12k_htt_tx_de_compl_stats_tlv {
+ __le32 tcl_dummy_frame;
+ __le32 tqm_dummy_frame;
+ __le32 tqm_notify_frame;
+ __le32 fw2wbm_enq;
+ __le32 tqm_bypass_frame;
+} __packed;
+
+enum ath12k_htt_tx_mumimo_grp_invalid_reason_code_stats {
+ ATH12K_HTT_TX_MUMIMO_GRP_VALID,
+ ATH12K_HTT_TX_MUMIMO_GRP_INVALID_NUM_MU_USERS_EXCEEDED_MU_MAX_USERS,
+ ATH12K_HTT_TX_MUMIMO_GRP_INVALID_SCHED_ALGO_NOT_MU_COMPATIBLE_GID,
+ ATH12K_HTT_TX_MUMIMO_GRP_INVALID_NON_PRIMARY_GRP,
+ ATH12K_HTT_TX_MUMIMO_GRP_INVALID_ZERO_CANDIDATES,
+ ATH12K_HTT_TX_MUMIMO_GRP_INVALID_MORE_CANDIDATES,
+ ATH12K_HTT_TX_MUMIMO_GRP_INVALID_GROUP_SIZE_EXCEED_NSS,
+ ATH12K_HTT_TX_MUMIMO_GRP_INVALID_GROUP_INELIGIBLE,
+ ATH12K_HTT_TX_MUMIMO_GRP_INVALID,
+ ATH12K_HTT_TX_MUMIMO_GRP_INVALID_GROUP_EFF_MU_TPUT_OMBPS,
+ ATH12K_HTT_TX_MUMIMO_GRP_INVALID_MAX_REASON_CODE,
+};
+
+#define ATH12K_HTT_NUM_AC_WMM 0x4
+#define ATH12K_HTT_MAX_NUM_SBT_INTR 4
+#define ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS 4
+#define ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS 8
+#define ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS 8
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS 7
+#define ATH12K_HTT_TX_NUM_OFDMA_USER_STATS 74
+#define ATH12K_HTT_TX_NUM_UL_MUMIMO_USER_STATS 8
+#define ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ 8
+#define ATH12K_HTT_STATS_MUMIMO_TPUT_NUM_BINS 10
+
+#define ATH12K_HTT_STATS_MAX_INVALID_REASON_CODE \
+ ATH12K_HTT_TX_MUMIMO_GRP_INVALID_MAX_REASON_CODE
+#define ATH12K_HTT_TX_NUM_MUMIMO_GRP_INVALID_WORDS \
+ (ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ * ATH12K_HTT_STATS_MAX_INVALID_REASON_CODE)
+
+struct ath12k_htt_tx_selfgen_cmn_stats_tlv {
+ __le32 mac_id__word;
+ __le32 su_bar;
+ __le32 rts;
+ __le32 cts2self;
+ __le32 qos_null;
+ __le32 delayed_bar_1;
+ __le32 delayed_bar_2;
+ __le32 delayed_bar_3;
+ __le32 delayed_bar_4;
+ __le32 delayed_bar_5;
+ __le32 delayed_bar_6;
+ __le32 delayed_bar_7;
+} __packed;
+
+struct ath12k_htt_tx_selfgen_ac_stats_tlv {
+ __le32 ac_su_ndpa;
+ __le32 ac_su_ndp;
+ __le32 ac_mu_mimo_ndpa;
+ __le32 ac_mu_mimo_ndp;
+ __le32 ac_mu_mimo_brpoll[ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS - 1];
+} __packed;
+
+struct ath12k_htt_tx_selfgen_ax_stats_tlv {
+ __le32 ax_su_ndpa;
+ __le32 ax_su_ndp;
+ __le32 ax_mu_mimo_ndpa;
+ __le32 ax_mu_mimo_ndp;
+ __le32 ax_mu_mimo_brpoll[ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS - 1];
+ __le32 ax_basic_trigger;
+ __le32 ax_bsr_trigger;
+ __le32 ax_mu_bar_trigger;
+ __le32 ax_mu_rts_trigger;
+ __le32 ax_ulmumimo_trigger;
+} __packed;
+
+struct ath12k_htt_tx_selfgen_be_stats_tlv {
+ __le32 be_su_ndpa;
+ __le32 be_su_ndp;
+ __le32 be_mu_mimo_ndpa;
+ __le32 be_mu_mimo_ndp;
+ __le32 be_mu_mimo_brpoll[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1];
+ __le32 be_basic_trigger;
+ __le32 be_bsr_trigger;
+ __le32 be_mu_bar_trigger;
+ __le32 be_mu_rts_trigger;
+ __le32 be_ulmumimo_trigger;
+ __le32 be_su_ndpa_queued;
+ __le32 be_su_ndp_queued;
+ __le32 be_mu_mimo_ndpa_queued;
+ __le32 be_mu_mimo_ndp_queued;
+ __le32 be_mu_mimo_brpoll_queued[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1];
+ __le32 be_ul_mumimo_trigger[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS];
+} __packed;
+
+struct ath12k_htt_tx_selfgen_ac_err_stats_tlv {
+ __le32 ac_su_ndp_err;
+ __le32 ac_su_ndpa_err;
+ __le32 ac_mu_mimo_ndpa_err;
+ __le32 ac_mu_mimo_ndp_err;
+ __le32 ac_mu_mimo_brp1_err;
+ __le32 ac_mu_mimo_brp2_err;
+ __le32 ac_mu_mimo_brp3_err;
+} __packed;
+
+struct ath12k_htt_tx_selfgen_ax_err_stats_tlv {
+ __le32 ax_su_ndp_err;
+ __le32 ax_su_ndpa_err;
+ __le32 ax_mu_mimo_ndpa_err;
+ __le32 ax_mu_mimo_ndp_err;
+ __le32 ax_mu_mimo_brp_err[ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS - 1];
+ __le32 ax_basic_trigger_err;
+ __le32 ax_bsr_trigger_err;
+ __le32 ax_mu_bar_trigger_err;
+ __le32 ax_mu_rts_trigger_err;
+ __le32 ax_ulmumimo_trigger_err;
+} __packed;
+
+struct ath12k_htt_tx_selfgen_be_err_stats_tlv {
+ __le32 be_su_ndp_err;
+ __le32 be_su_ndpa_err;
+ __le32 be_mu_mimo_ndpa_err;
+ __le32 be_mu_mimo_ndp_err;
+ __le32 be_mu_mimo_brp_err[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1];
+ __le32 be_basic_trigger_err;
+ __le32 be_bsr_trigger_err;
+ __le32 be_mu_bar_trigger_err;
+ __le32 be_mu_rts_trigger_err;
+ __le32 be_ulmumimo_trigger_err;
+ __le32 be_mu_mimo_brp_err_num_cbf_rxd[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS];
+ __le32 be_su_ndpa_flushed;
+ __le32 be_su_ndp_flushed;
+ __le32 be_mu_mimo_ndpa_flushed;
+ __le32 be_mu_mimo_ndp_flushed;
+ __le32 be_mu_mimo_brpoll_flushed[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1];
+ __le32 be_ul_mumimo_trigger_err[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS];
+} __packed;
+
+enum ath12k_htt_tx_selfgen_sch_tsflag_error_stats {
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_FLUSH_RCVD_ERR,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_FILT_SCHED_CMD_ERR,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_RESP_MISMATCH_ERR,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_RESP_CBF_MIMO_CTRL_MISMATCH_ERR,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_RESP_CBF_BW_MISMATCH_ERR,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_RETRY_COUNT_FAIL_ERR,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_RESP_TOO_LATE_RECEIVED_ERR,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_SIFS_STALL_NO_NEXT_CMD_ERR,
+
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS
+};
+
+struct ath12k_htt_tx_selfgen_ac_sched_status_stats_tlv {
+ __le32 ac_su_ndpa_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+ __le32 ac_su_ndp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+ __le32 ac_su_ndp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
+ __le32 ac_mu_mimo_ndpa_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+ __le32 ac_mu_mimo_ndp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+ __le32 ac_mu_mimo_ndp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
+ __le32 ac_mu_mimo_brp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+ __le32 ac_mu_mimo_brp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
+} __packed;
+
+struct ath12k_htt_tx_selfgen_ax_sched_status_stats_tlv {
+ __le32 ax_su_ndpa_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+ __le32 ax_su_ndp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+ __le32 ax_su_ndp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
+ __le32 ax_mu_mimo_ndpa_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+ __le32 ax_mu_mimo_ndp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+ __le32 ax_mu_mimo_ndp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
+ __le32 ax_mu_brp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+ __le32 ax_mu_brp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
+ __le32 ax_mu_bar_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+ __le32 ax_mu_bar_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
+ __le32 ax_basic_trig_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+ __le32 ax_basic_trig_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
+ __le32 ax_ulmumimo_trig_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+ __le32 ax_ulmumimo_trig_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
+} __packed;
+
+struct ath12k_htt_tx_selfgen_be_sched_status_stats_tlv {
+ __le32 be_su_ndpa_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+ __le32 be_su_ndp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+ __le32 be_su_ndp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
+ __le32 be_mu_mimo_ndpa_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+ __le32 be_mu_mimo_ndp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+ __le32 be_mu_mimo_ndp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
+ __le32 be_mu_brp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+ __le32 be_mu_brp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
+ __le32 be_mu_bar_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+ __le32 be_mu_bar_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
+ __le32 be_basic_trig_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+ __le32 be_basic_trig_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
+ __le32 be_ulmumimo_trig_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+ __le32 be_ulmumimo_trig_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
+} __packed;
+
+struct ath12k_htt_stats_string_tlv {
+ DECLARE_FLEX_ARRAY(__le32, data);
+} __packed;
+
+#define ATH12K_HTT_SRING_STATS_MAC_ID GENMASK(7, 0)
+#define ATH12K_HTT_SRING_STATS_RING_ID GENMASK(15, 8)
+#define ATH12K_HTT_SRING_STATS_ARENA GENMASK(23, 16)
+#define ATH12K_HTT_SRING_STATS_EP BIT(24)
+#define ATH12K_HTT_SRING_STATS_NUM_AVAIL_WORDS GENMASK(15, 0)
+#define ATH12K_HTT_SRING_STATS_NUM_VALID_WORDS GENMASK(31, 16)
+#define ATH12K_HTT_SRING_STATS_HEAD_PTR GENMASK(15, 0)
+#define ATH12K_HTT_SRING_STATS_TAIL_PTR GENMASK(31, 16)
+#define ATH12K_HTT_SRING_STATS_CONSUMER_EMPTY GENMASK(15, 0)
+#define ATH12K_HTT_SRING_STATS_PRODUCER_FULL GENMASK(31, 16)
+#define ATH12K_HTT_SRING_STATS_PREFETCH_COUNT GENMASK(15, 0)
+#define ATH12K_HTT_SRING_STATS_INTERNAL_TAIL_PTR GENMASK(31, 16)
+
+struct ath12k_htt_sring_stats_tlv {
+ __le32 mac_id__ring_id__arena__ep;
+ __le32 base_addr_lsb;
+ __le32 base_addr_msb;
+ __le32 ring_size;
+ __le32 elem_size;
+ __le32 num_avail_words__num_valid_words;
+ __le32 head_ptr__tail_ptr;
+ __le32 consumer_empty__producer_full;
+ __le32 prefetch_count__internal_tail_ptr;
+} __packed;
+
+struct ath12k_htt_sfm_cmn_tlv {
+ __le32 mac_id__word;
+ __le32 buf_total;
+ __le32 mem_empty;
+ __le32 deallocate_bufs;
+ __le32 num_records;
+} __packed;
+
+struct ath12k_htt_sfm_client_tlv {
+ __le32 client_id;
+ __le32 buf_min;
+ __le32 buf_max;
+ __le32 buf_busy;
+ __le32 buf_alloc;
+ __le32 buf_avail;
+ __le32 num_users;
+} __packed;
+
+struct ath12k_htt_sfm_client_user_tlv {
+ DECLARE_FLEX_ARRAY(__le32, dwords_used_by_user_n);
+} __packed;
+
+struct ath12k_htt_tx_pdev_mu_mimo_sch_stats_tlv {
+ __le32 mu_mimo_sch_posted;
+ __le32 mu_mimo_sch_failed;
+ __le32 mu_mimo_ppdu_posted;
+ __le32 ac_mu_mimo_sch_nusers[ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS];
+ __le32 ax_mu_mimo_sch_nusers[ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS];
+ __le32 ax_ofdma_sch_nusers[ATH12K_HTT_TX_NUM_OFDMA_USER_STATS];
+ __le32 ax_ul_ofdma_nusers[ATH12K_HTT_TX_NUM_OFDMA_USER_STATS];
+ __le32 ax_ul_ofdma_bsr_nusers[ATH12K_HTT_TX_NUM_OFDMA_USER_STATS];
+ __le32 ax_ul_ofdma_bar_nusers[ATH12K_HTT_TX_NUM_OFDMA_USER_STATS];
+ __le32 ax_ul_ofdma_brp_nusers[ATH12K_HTT_TX_NUM_OFDMA_USER_STATS];
+ __le32 ax_ul_mumimo_nusers[ATH12K_HTT_TX_NUM_UL_MUMIMO_USER_STATS];
+ __le32 ax_ul_mumimo_brp_nusers[ATH12K_HTT_TX_NUM_UL_MUMIMO_USER_STATS];
+ __le32 ac_mu_mimo_per_grp_sz[ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS];
+ __le32 ax_mu_mimo_per_grp_sz[ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS];
+ __le32 be_mu_mimo_sch_nusers[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS];
+ __le32 be_mu_mimo_per_grp_sz[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS];
+ __le32 ac_mu_mimo_grp_sz_ext[ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS];
+} __packed;
+
+struct ath12k_htt_tx_pdev_mumimo_grp_stats_tlv {
+ __le32 dl_mumimo_grp_best_grp_size[ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ];
+ __le32 dl_mumimo_grp_best_num_usrs[ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS];
+ __le32 dl_mumimo_grp_eligible[ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ];
+ __le32 dl_mumimo_grp_ineligible[ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ];
+ __le32 dl_mumimo_grp_invalid[ATH12K_HTT_TX_NUM_MUMIMO_GRP_INVALID_WORDS];
+ __le32 dl_mumimo_grp_tputs[ATH12K_HTT_STATS_MUMIMO_TPUT_NUM_BINS];
+ __le32 ul_mumimo_grp_best_grp_size[ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ];
+ __le32 ul_mumimo_grp_best_usrs[ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS];
+ __le32 ul_mumimo_grp_tputs[ATH12K_HTT_STATS_MUMIMO_TPUT_NUM_BINS];
+} __packed;
+
+enum ath12k_htt_stats_tx_sched_modes {
+ ATH12K_HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC = 0,
+ ATH12K_HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX,
+ ATH12K_HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX,
+ ATH12K_HTT_STATS_TX_SCHED_MODE_MU_OFDMA_BE,
+ ATH12K_HTT_STATS_TX_SCHED_MODE_MU_MIMO_BE
+};
+
+struct ath12k_htt_tx_pdev_mpdu_stats_tlv {
+ __le32 mpdus_queued_usr;
+ __le32 mpdus_tried_usr;
+ __le32 mpdus_failed_usr;
+ __le32 mpdus_requeued_usr;
+ __le32 err_no_ba_usr;
+ __le32 mpdu_underrun_usr;
+ __le32 ampdu_underrun_usr;
+ __le32 user_index;
+ __le32 tx_sched_mode;
+} __packed;
+
+struct ath12k_htt_pdev_stats_cca_counters_tlv {
+ __le32 tx_frame_usec;
+ __le32 rx_frame_usec;
+ __le32 rx_clear_usec;
+ __le32 my_rx_frame_usec;
+ __le32 usec_cnt;
+ __le32 med_rx_idle_usec;
+ __le32 med_tx_idle_global_usec;
+ __le32 cca_obss_usec;
+} __packed;
+
+struct ath12k_htt_pdev_cca_stats_hist_v1_tlv {
+ __le32 chan_num;
+ __le32 num_records;
+ __le32 valid_cca_counters_bitmap;
+ __le32 collection_interval;
+} __packed;
+
+struct ath12k_htt_pdev_obss_pd_stats_tlv {
+ __le32 num_obss_tx_ppdu_success;
+ __le32 num_obss_tx_ppdu_failure;
+ __le32 num_sr_tx_transmissions;
+ __le32 num_spatial_reuse_opportunities;
+ __le32 num_non_srg_opportunities;
+ __le32 num_non_srg_ppdu_tried;
+ __le32 num_non_srg_ppdu_success;
+ __le32 num_srg_opportunities;
+ __le32 num_srg_ppdu_tried;
+ __le32 num_srg_ppdu_success;
+ __le32 num_psr_opportunities;
+ __le32 num_psr_ppdu_tried;
+ __le32 num_psr_ppdu_success;
+ __le32 num_non_srg_tried_per_ac[ATH12K_HTT_NUM_AC_WMM];
+ __le32 num_non_srg_success_ac[ATH12K_HTT_NUM_AC_WMM];
+ __le32 num_srg_tried_per_ac[ATH12K_HTT_NUM_AC_WMM];
+ __le32 num_srg_success_per_ac[ATH12K_HTT_NUM_AC_WMM];
+ __le32 num_obss_min_dur_check_flush_cnt;
+ __le32 num_sr_ppdu_abort_flush_cnt;
+} __packed;
+
+#define ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS 14
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+#define ATH12K_HTT_TXBF_NUM_BW_CNTRS 5
+#define ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES 2
+
+struct ath12k_htt_pdev_txrate_txbf_stats_tlv {
+ __le32 tx_su_txbf_mcs[ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS];
+ __le32 tx_su_ibf_mcs[ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS];
+ __le32 tx_su_ol_mcs[ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS];
+ __le32 tx_su_txbf_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ __le32 tx_su_ibf_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ __le32 tx_su_ol_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ __le32 tx_su_txbf_bw[ATH12K_HTT_TXBF_NUM_BW_CNTRS];
+ __le32 tx_su_ibf_bw[ATH12K_HTT_TXBF_NUM_BW_CNTRS];
+ __le32 tx_su_ol_bw[ATH12K_HTT_TXBF_NUM_BW_CNTRS];
+ __le32 tx_legacy_ofdm_rate[ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
+ __le32 txbf[ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_TXBF_NUM_BW_CNTRS];
+ __le32 ibf[ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_TXBF_NUM_BW_CNTRS];
+ __le32 ol[ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_TXBF_NUM_BW_CNTRS];
+ __le32 txbf_flag_set_mu_mode;
+ __le32 txbf_flag_set_final_status;
+ __le32 txbf_flag_not_set_verified_txbf_mode;
+ __le32 txbf_flag_not_set_disable_p2p_access;
+ __le32 txbf_flag_not_set_max_nss_in_he160;
+ __le32 txbf_flag_not_set_disable_uldlofdma;
+ __le32 txbf_flag_not_set_mcs_threshold_val;
+ __le32 txbf_flag_not_set_final_status;
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_elem_t {
+ __le32 ax_ofdma_ndpa_queued;
+ __le32 ax_ofdma_ndpa_tried;
+ __le32 ax_ofdma_ndpa_flush;
+ __le32 ax_ofdma_ndpa_err;
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_tlv {
+ __le32 num_elems_ax_ndpa_arr;
+ __le32 arr_elem_size_ax_ndpa;
+ DECLARE_FLEX_ARRAY(struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_elem_t, ax_ndpa);
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_ndp_stats_elem_t {
+ __le32 ax_ofdma_ndp_queued;
+ __le32 ax_ofdma_ndp_tried;
+ __le32 ax_ofdma_ndp_flush;
+ __le32 ax_ofdma_ndp_err;
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_ndp_stats_tlv {
+ __le32 num_elems_ax_ndp_arr;
+ __le32 arr_elem_size_ax_ndp;
+ DECLARE_FLEX_ARRAY(struct ath12k_htt_txbf_ofdma_ax_ndp_stats_elem_t, ax_ndp);
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_brp_stats_elem_t {
+ __le32 ax_ofdma_brp_queued;
+ __le32 ax_ofdma_brp_tried;
+ __le32 ax_ofdma_brp_flushed;
+ __le32 ax_ofdma_brp_err;
+ __le32 ax_ofdma_num_cbf_rcvd;
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_brp_stats_tlv {
+ __le32 num_elems_ax_brp_arr;
+ __le32 arr_elem_size_ax_brp;
+ DECLARE_FLEX_ARRAY(struct ath12k_htt_txbf_ofdma_ax_brp_stats_elem_t, ax_brp);
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_steer_stats_elem_t {
+ __le32 num_ppdu_steer;
+ __le32 num_ppdu_ol;
+ __le32 num_usr_prefetch;
+ __le32 num_usr_sound;
+ __le32 num_usr_force_sound;
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_steer_stats_tlv {
+ __le32 num_elems_ax_steer_arr;
+ __le32 arr_elem_size_ax_steer;
+ DECLARE_FLEX_ARRAY(struct ath12k_htt_txbf_ofdma_ax_steer_stats_elem_t, ax_steer);
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_steer_mpdu_stats_tlv {
+ __le32 ax_ofdma_rbo_steer_mpdus_tried;
+ __le32 ax_ofdma_rbo_steer_mpdus_failed;
+ __le32 ax_ofdma_sifs_steer_mpdus_tried;
+ __le32 ax_ofdma_sifs_steer_mpdus_failed;
+} __packed;
+
+enum ath12k_htt_stats_page_lock_state {
+ ATH12K_HTT_STATS_PAGE_LOCKED = 0,
+ ATH12K_HTT_STATS_PAGE_UNLOCKED = 1,
+ ATH12K_NUM_PG_LOCK_STATE
+};
+
+#define ATH12K_PAGER_MAX 10
+
+#define ATH12K_HTT_DLPAGER_ASYNC_LOCK_PG_CNT_INFO0 GENMASK(7, 0)
+#define ATH12K_HTT_DLPAGER_SYNC_LOCK_PG_CNT_INFO0 GENMASK(15, 8)
+#define ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO1 GENMASK(15, 0)
+#define ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO1 GENMASK(31, 16)
+#define ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO2 GENMASK(15, 0)
+#define ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO2 GENMASK(31, 16)
+
+struct ath12k_htt_pgs_info {
+ __le32 page_num;
+ __le32 num_pgs;
+ __le32 ts_lsb;
+ __le32 ts_msb;
+} __packed;
+
+struct ath12k_htt_dl_pager_stats_tlv {
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ struct ath12k_htt_pgs_info pgs_info[ATH12K_NUM_PG_LOCK_STATE][ATH12K_PAGER_MAX];
+} __packed;
+
+#define ATH12K_HTT_STATS_MAX_CHAINS 8
+#define ATH12K_HTT_MAX_RX_PKT_CNT 8
+#define ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT 8
+#define ATH12K_HTT_MAX_PER_BLK_ERR_CNT 20
+#define ATH12K_HTT_MAX_RX_OTA_ERR_CNT 14
+#define ATH12K_HTT_MAX_CH_PWR_INFO_SIZE 16
+
+struct ath12k_htt_phy_stats_tlv {
+ a_sle32 nf_chain[ATH12K_HTT_STATS_MAX_CHAINS];
+ __le32 false_radar_cnt;
+ __le32 radar_cs_cnt;
+ a_sle32 ani_level;
+ __le32 fw_run_time;
+ a_sle32 runtime_nf_chain[ATH12K_HTT_STATS_MAX_CHAINS];
+} __packed;
+
+struct ath12k_htt_phy_counters_tlv {
+ __le32 rx_ofdma_timing_err_cnt;
+ __le32 rx_cck_fail_cnt;
+ __le32 mactx_abort_cnt;
+ __le32 macrx_abort_cnt;
+ __le32 phytx_abort_cnt;
+ __le32 phyrx_abort_cnt;
+ __le32 phyrx_defer_abort_cnt;
+ __le32 rx_gain_adj_lstf_event_cnt;
+ __le32 rx_gain_adj_non_legacy_cnt;
+ __le32 rx_pkt_cnt[ATH12K_HTT_MAX_RX_PKT_CNT];
+ __le32 rx_pkt_crc_pass_cnt[ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT];
+ __le32 per_blk_err_cnt[ATH12K_HTT_MAX_PER_BLK_ERR_CNT];
+ __le32 rx_ota_err_cnt[ATH12K_HTT_MAX_RX_OTA_ERR_CNT];
+} __packed;
+
+struct ath12k_htt_phy_reset_stats_tlv {
+ __le32 pdev_id;
+ __le32 chan_mhz;
+ __le32 chan_band_center_freq1;
+ __le32 chan_band_center_freq2;
+ __le32 chan_phy_mode;
+ __le32 chan_flags;
+ __le32 chan_num;
+ __le32 reset_cause;
+ __le32 prev_reset_cause;
+ __le32 phy_warm_reset_src;
+ __le32 rx_gain_tbl_mode;
+ __le32 xbar_val;
+ __le32 force_calibration;
+ __le32 phyrf_mode;
+ __le32 phy_homechan;
+ __le32 phy_tx_ch_mask;
+ __le32 phy_rx_ch_mask;
+ __le32 phybb_ini_mask;
+ __le32 phyrf_ini_mask;
+ __le32 phy_dfs_en_mask;
+ __le32 phy_sscan_en_mask;
+ __le32 phy_synth_sel_mask;
+ __le32 phy_adfs_freq;
+ __le32 cck_fir_settings;
+ __le32 phy_dyn_pri_chan;
+ __le32 cca_thresh;
+ __le32 dyn_cca_status;
+ __le32 rxdesense_thresh_hw;
+ __le32 rxdesense_thresh_sw;
+} __packed;
+
+struct ath12k_htt_phy_reset_counters_tlv {
+ __le32 pdev_id;
+ __le32 cf_active_low_fail_cnt;
+ __le32 cf_active_low_pass_cnt;
+ __le32 phy_off_through_vreg_cnt;
+ __le32 force_calibration_cnt;
+ __le32 rf_mode_switch_phy_off_cnt;
+ __le32 temperature_recal_cnt;
+} __packed;
+
+struct ath12k_htt_phy_tpc_stats_tlv {
+ __le32 pdev_id;
+ __le32 tx_power_scale;
+ __le32 tx_power_scale_db;
+ __le32 min_negative_tx_power;
+ __le32 reg_ctl_domain;
+ __le32 max_reg_allowed_power[ATH12K_HTT_STATS_MAX_CHAINS];
+ __le32 max_reg_allowed_power_6ghz[ATH12K_HTT_STATS_MAX_CHAINS];
+ __le32 twice_max_rd_power;
+ __le32 max_tx_power;
+ __le32 home_max_tx_power;
+ __le32 psd_power;
+ __le32 eirp_power;
+ __le32 power_type_6ghz;
+ __le32 sub_band_cfreq[ATH12K_HTT_MAX_CH_PWR_INFO_SIZE];
+ __le32 sub_band_txpower[ATH12K_HTT_MAX_CH_PWR_INFO_SIZE];
+} __packed;
+
+struct ath12k_htt_t2h_soc_txrx_stats_common_tlv {
+ __le32 inv_peers_msdu_drop_count_hi;
+ __le32 inv_peers_msdu_drop_count_lo;
+} __packed;
+
+#define ATH12K_HTT_AST_PDEV_ID_INFO GENMASK(1, 0)
+#define ATH12K_HTT_AST_VDEV_ID_INFO GENMASK(9, 2)
+#define ATH12K_HTT_AST_NEXT_HOP_INFO BIT(10)
+#define ATH12K_HTT_AST_MCAST_INFO BIT(11)
+#define ATH12K_HTT_AST_MONITOR_DIRECT_INFO BIT(12)
+#define ATH12K_HTT_AST_MESH_STA_INFO BIT(13)
+#define ATH12K_HTT_AST_MEC_INFO BIT(14)
+#define ATH12K_HTT_AST_INTRA_BSS_INFO BIT(15)
+
+struct ath12k_htt_ast_entry_tlv {
+ __le32 sw_peer_id;
+ __le32 ast_index;
+ struct htt_mac_addr mac_addr;
+ __le32 info;
+} __packed;
+
+enum ath12k_htt_stats_direction {
+ ATH12K_HTT_STATS_DIRECTION_TX,
+ ATH12K_HTT_STATS_DIRECTION_RX
+};
+
+enum ath12k_htt_stats_ppdu_type {
+ ATH12K_HTT_STATS_PPDU_TYPE_MODE_SU,
+ ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_MIMO,
+ ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_MIMO,
+ ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_OFDMA,
+ ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_OFDMA
+};
+
+enum ath12k_htt_stats_param_type {
+ ATH12K_HTT_STATS_PREAM_OFDM,
+ ATH12K_HTT_STATS_PREAM_CCK,
+ ATH12K_HTT_STATS_PREAM_HT,
+ ATH12K_HTT_STATS_PREAM_VHT,
+ ATH12K_HTT_STATS_PREAM_HE,
+ ATH12K_HTT_STATS_PREAM_EHT,
+ ATH12K_HTT_STATS_PREAM_RSVD1,
+ ATH12K_HTT_STATS_PREAM_COUNT,
+};
+
+#define ATH12K_HTT_PUNCT_STATS_MAX_SUBBAND_CNT 32
+
+struct ath12k_htt_pdev_puncture_stats_tlv {
+ __le32 mac_id__word;
+ __le32 direction;
+ __le32 preamble;
+ __le32 ppdu_type;
+ __le32 subband_cnt;
+ __le32 last_used_pattern_mask;
+ __le32 num_subbands_used_cnt[ATH12K_HTT_PUNCT_STATS_MAX_SUBBAND_CNT];
+} __packed;
+
+struct ath12k_htt_dmac_reset_stats_tlv {
+ __le32 reset_count;
+ __le32 reset_time_lo_ms;
+ __le32 reset_time_hi_ms;
+ __le32 disengage_time_lo_ms;
+ __le32 disengage_time_hi_ms;
+ __le32 engage_time_lo_ms;
+ __le32 engage_time_hi_ms;
+ __le32 disengage_count;
+ __le32 engage_count;
+ __le32 drain_dest_ring_mask;
+} __packed;
+
+struct ath12k_htt_pdev_sched_algo_ofdma_stats_tlv {
+ __le32 mac_id__word;
+ __le32 rate_based_dlofdma_enabled_cnt[ATH12K_HTT_NUM_AC_WMM];
+ __le32 rate_based_dlofdma_disabled_cnt[ATH12K_HTT_NUM_AC_WMM];
+ __le32 rate_based_dlofdma_probing_cnt[ATH12K_HTT_NUM_AC_WMM];
+ __le32 rate_based_dlofdma_monitor_cnt[ATH12K_HTT_NUM_AC_WMM];
+ __le32 chan_acc_lat_based_dlofdma_enabled_cnt[ATH12K_HTT_NUM_AC_WMM];
+ __le32 chan_acc_lat_based_dlofdma_disabled_cnt[ATH12K_HTT_NUM_AC_WMM];
+ __le32 chan_acc_lat_based_dlofdma_monitor_cnt[ATH12K_HTT_NUM_AC_WMM];
+ __le32 downgrade_to_dl_su_ru_alloc_fail[ATH12K_HTT_NUM_AC_WMM];
+ __le32 candidate_list_single_user_disable_ofdma[ATH12K_HTT_NUM_AC_WMM];
+ __le32 dl_cand_list_dropped_high_ul_qos_weight[ATH12K_HTT_NUM_AC_WMM];
+ __le32 ax_dlofdma_disabled_due_to_pipelining[ATH12K_HTT_NUM_AC_WMM];
+ __le32 dlofdma_disabled_su_only_eligible[ATH12K_HTT_NUM_AC_WMM];
+ __le32 dlofdma_disabled_consec_no_mpdus_tried[ATH12K_HTT_NUM_AC_WMM];
+ __le32 dlofdma_disabled_consec_no_mpdus_success[ATH12K_HTT_NUM_AC_WMM];
+} __packed;
+
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS 4
+#define ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS 8
+#define ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS 14
+
+enum ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE {
+ ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_26,
+ ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_52,
+ ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_52_26,
+ ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_106,
+ ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_106_26,
+ ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_242,
+ ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_484,
+ ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_484_242,
+ ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996,
+ ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996_484,
+ ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996_484_242,
+ ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x2,
+ ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x2_484,
+ ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x3,
+ ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x3_484,
+ ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x4,
+ ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS,
+};
+
+enum ATH12K_HTT_RC_MODE {
+ ATH12K_HTT_RC_MODE_SU_OL,
+ ATH12K_HTT_RC_MODE_SU_BF,
+ ATH12K_HTT_RC_MODE_MU1_INTF,
+ ATH12K_HTT_RC_MODE_MU2_INTF,
+ ATH12K_HTT_RC_MODE_MU3_INTF,
+ ATH12K_HTT_RC_MODE_MU4_INTF,
+ ATH12K_HTT_RC_MODE_MU5_INTF,
+ ATH12K_HTT_RC_MODE_MU6_INTF,
+ ATH12K_HTT_RC_MODE_MU7_INTF,
+ ATH12K_HTT_RC_MODE_2D_COUNT
+};
+
+enum ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE {
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2,
+ ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS
+};
+
+enum ath12k_htt_stats_rc_mode {
+ ATH12K_HTT_STATS_RC_MODE_DLSU = 0,
+ ATH12K_HTT_STATS_RC_MODE_DLMUMIMO = 1,
+ ATH12K_HTT_STATS_RC_MODE_DLOFDMA = 2,
+ ATH12K_HTT_STATS_RC_MODE_ULMUMIMO = 3,
+ ATH12K_HTT_STATS_RC_MODE_ULOFDMA = 4,
+};
+
+enum ath12k_htt_stats_ru_type {
+ ATH12K_HTT_STATS_RU_TYPE_INVALID,
+ ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY,
+ ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU,
+};
+
+struct ath12k_htt_tx_rate_stats {
+ __le32 ppdus_tried;
+ __le32 ppdus_ack_failed;
+ __le32 mpdus_tried;
+ __le32 mpdus_failed;
+} __packed;
+
+struct ath12k_htt_tx_per_rate_stats_tlv {
+ __le32 rc_mode;
+ __le32 last_probed_mcs;
+ __le32 last_probed_nss;
+ __le32 last_probed_bw;
+ struct ath12k_htt_tx_rate_stats per_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS];
+ struct ath12k_htt_tx_rate_stats per_nss[ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS];
+ struct ath12k_htt_tx_rate_stats per_mcs[ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS];
+ struct ath12k_htt_tx_rate_stats per_bw320;
+ __le32 probe_cnt[ATH12K_HTT_RC_MODE_2D_COUNT];
+ __le32 ru_type;
+ struct ath12k_htt_tx_rate_stats ru[ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS];
+} __packed;
+
+#define ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS 16
+#define ATH12K_HTT_TX_PDEV_NUM_BE_BW_CNTRS 5
+#define ATH12K_HTT_TX_PDEV_NUM_EHT_SIG_MCS_CNTRS 4
+#define ATH12K_HTT_TX_PDEV_NUM_GI_CNTRS 4
+
+struct ath12k_htt_tx_pdev_rate_stats_be_ofdma_tlv {
+ __le32 mac_id__word;
+ __le32 be_ofdma_tx_ldpc;
+ __le32 be_ofdma_tx_mcs[ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS];
+ __le32 be_ofdma_tx_nss[ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS];
+ __le32 be_ofdma_tx_bw[ATH12K_HTT_TX_PDEV_NUM_BE_BW_CNTRS];
+ __le32 gi[ATH12K_HTT_TX_PDEV_NUM_GI_CNTRS][ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS];
+ __le32 be_ofdma_tx_ru_size[ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS];
+ __le32 be_ofdma_eht_sig_mcs[ATH12K_HTT_TX_PDEV_NUM_EHT_SIG_MCS_CNTRS];
+} __packed;
+
+struct ath12k_htt_pdev_mbssid_ctrl_frame_tlv {
+ __le32 mac_id__word;
+ __le32 basic_trigger_across_bss;
+ __le32 basic_trigger_within_bss;
+ __le32 bsr_trigger_across_bss;
+ __le32 bsr_trigger_within_bss;
+ __le32 mu_rts_across_bss;
+ __le32 mu_rts_within_bss;
+ __le32 ul_mumimo_trigger_across_bss;
+ __le32 ul_mumimo_trigger_within_bss;
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
index c8e1b244b69e..9e5a4e75f2f6 100644
--- a/drivers/net/wireless/ath/ath12k/dp.c
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -14,6 +14,11 @@
#include "peer.h"
#include "dp_mon.h"
+enum ath12k_dp_desc_type {
+ ATH12K_DP_TX_DESC,
+ ATH12K_DP_RX_DESC,
+};
+
static void ath12k_dp_htt_htc_tx_complete(struct ath12k_base *ab,
struct sk_buff *skb)
{
@@ -36,6 +41,11 @@ void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr)
return;
}
+ if (!peer->primary_link) {
+ spin_unlock_bh(&ab->base_lock);
+ return;
+ }
+
ath12k_dp_rx_peer_tid_cleanup(ar, peer);
crypto_free_shash(peer->tfm_mmic);
peer->dp_setup_done = false;
@@ -127,7 +137,9 @@ static int ath12k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
enum hal_ring_type type, int ring_num)
{
+ const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
const u8 *grp_mask;
+ int i;
switch (type) {
case HAL_WBM2SW_RELEASE:
@@ -135,6 +147,14 @@ static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
grp_mask = &ab->hw_params->ring_mask->rx_wbm_rel[0];
ring_num = 0;
} else {
+ map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
+ for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
+ if (ring_num == map[i].wbm_ring_num) {
+ ring_num = i;
+ break;
+ }
+ }
+
grp_mask = &ab->hw_params->ring_mask->tx[0];
}
break;
@@ -312,20 +332,22 @@ int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring,
}
static
-u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, struct ath12k_vif *arvif)
+u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab,
+ struct ath12k_link_vif *arvif)
{
u32 bank_config = 0;
+ struct ath12k_vif *ahvif = arvif->ahvif;
/* Only valid for raw frames with HW crypto enabled.
* With SW crypto, mac80211 sets key per packet
*/
- if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
+ if (ahvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
bank_config |=
- u32_encode_bits(ath12k_dp_tx_get_encrypt_type(arvif->key_cipher),
+ u32_encode_bits(ath12k_dp_tx_get_encrypt_type(ahvif->key_cipher),
HAL_TX_BANK_CONFIG_ENCRYPT_TYPE);
- bank_config |= u32_encode_bits(arvif->tx_encap_type,
+ bank_config |= u32_encode_bits(ahvif->tx_encap_type,
HAL_TX_BANK_CONFIG_ENCAP_TYPE);
bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_SRC_BUFFER_SWAP) |
u32_encode_bits(0, HAL_TX_BANK_CONFIG_LINK_META_SWAP) |
@@ -340,7 +362,7 @@ u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, struct ath12k_vif
HAL_TX_ADDRY_EN),
HAL_TX_BANK_CONFIG_ADDRY_EN);
- bank_config |= u32_encode_bits(ieee80211_vif_is_mesh(arvif->vif) ? 3 : 0,
+ bank_config |= u32_encode_bits(ieee80211_vif_is_mesh(ahvif->vif) ? 3 : 0,
HAL_TX_BANK_CONFIG_MESH_EN) |
u32_encode_bits(arvif->vdev_id_check_en,
HAL_TX_BANK_CONFIG_VDEV_ID_CHECK_EN);
@@ -350,7 +372,8 @@ u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, struct ath12k_vif
return bank_config;
}
-static int ath12k_dp_tx_get_bank_profile(struct ath12k_base *ab, struct ath12k_vif *arvif,
+static int ath12k_dp_tx_get_bank_profile(struct ath12k_base *ab,
+ struct ath12k_link_vif *arvif,
struct ath12k_dp *dp)
{
int bank_id = DP_INVALID_BANK_ID;
@@ -452,8 +475,6 @@ static void ath12k_dp_srng_common_cleanup(struct ath12k_base *ab)
ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
}
- ath12k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
- ath12k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
ath12k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
}
@@ -474,20 +495,6 @@ static int ath12k_dp_srng_common_setup(struct ath12k_base *ab)
goto err;
}
- ret = ath12k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
- DP_TCL_CMD_RING_SIZE);
- if (ret) {
- ath12k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
- goto err;
- }
-
- ret = ath12k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
- 0, 0, DP_TCL_STATUS_RING_SIZE);
- if (ret) {
- ath12k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
- goto err;
- }
-
for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
tx_comp_ring_num = map[i].wbm_ring_num;
@@ -611,6 +618,7 @@ static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab,
int i;
int ret = 0;
u32 end_offset, cookie;
+ enum hal_rx_buf_return_buf_manager rbm = dp->idle_link_rbm;
n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
ath12k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
@@ -641,7 +649,8 @@ static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab,
paddr = link_desc_banks[i].paddr;
while (n_entries) {
cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
- ath12k_hal_set_link_desc_addr(scatter_buf, cookie, paddr);
+ ath12k_hal_set_link_desc_addr(scatter_buf, cookie,
+ paddr, rbm);
n_entries--;
paddr += HAL_LINK_DESC_SIZE;
if (rem_entries) {
@@ -785,6 +794,7 @@ int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
u32 paddr;
int i, ret;
u32 cookie;
+ enum hal_rx_buf_return_buf_manager rbm = ab->dp.idle_link_rbm;
tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
tot_mem_sz += HAL_LINK_DESC_ALIGN;
@@ -845,8 +855,7 @@ int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
while (n_entries &&
(desc = ath12k_hal_srng_src_get_next_entry(ab, srng))) {
cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
- ath12k_hal_set_link_desc_addr(desc,
- cookie, paddr);
+ ath12k_hal_set_link_desc_addr(desc, cookie, paddr, rbm);
n_entries--;
paddr += HAL_LINK_DESC_SIZE;
}
@@ -876,11 +885,9 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
enum dp_monitor_mode monitor_mode;
u8 ring_mask;
- while (i < ab->hw_params->max_tx_ring) {
- if (ab->hw_params->ring_mask->tx[grp_id] &
- BIT(ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[i].wbm_ring_num))
- ath12k_dp_tx_completion_handler(ab, i);
- i++;
+ if (ab->hw_params->ring_mask->tx[grp_id]) {
+ i = fls(ab->hw_params->ring_mask->tx[grp_id]) - 1;
+ ath12k_dp_tx_completion_handler(ab, i);
}
if (ab->hw_params->ring_mask->rx_err[grp_id]) {
@@ -916,8 +923,8 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
monitor_mode = ATH12K_DP_RX_MONITOR_MODE;
ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id];
for (i = 0; i < ab->num_radios; i++) {
- for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) {
- int id = i * ab->hw_params->num_rxmda_per_pdev + j;
+ for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
+ int id = i * ab->hw_params->num_rxdma_per_pdev + j;
if (ring_mask & BIT(id)) {
work_done =
@@ -937,8 +944,8 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
monitor_mode = ATH12K_DP_TX_MONITOR_MODE;
ring_mask = ab->hw_params->ring_mask->tx_mon_dest[grp_id];
for (i = 0; i < ab->num_radios; i++) {
- for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) {
- int id = i * ab->hw_params->num_rxmda_per_pdev + j;
+ for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
+ int id = i * ab->hw_params->num_rxdma_per_pdev + j;
if (ring_mask & BIT(id)) {
work_done =
@@ -960,8 +967,9 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
if (ab->hw_params->ring_mask->host2rxdma[grp_id]) {
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ LIST_HEAD(list);
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, 0);
+ ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0);
}
/* TODO: Implement handler for other interrupts */
@@ -974,27 +982,23 @@ void ath12k_dp_pdev_free(struct ath12k_base *ab)
{
int i;
+ if (!ab->mon_reap_timer.function)
+ return;
+
del_timer_sync(&ab->mon_reap_timer);
for (i = 0; i < ab->num_radios; i++)
ath12k_dp_rx_pdev_free(ab, i);
}
-void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab)
+void ath12k_dp_pdev_pre_alloc(struct ath12k *ar)
{
- struct ath12k *ar;
- struct ath12k_pdev_dp *dp;
- int i;
+ struct ath12k_pdev_dp *dp = &ar->dp;
- for (i = 0; i < ab->num_radios; i++) {
- ar = ab->pdevs[i].ar;
- dp = &ar->dp;
- dp->mac_id = i;
- atomic_set(&dp->num_tx_pending, 0);
- init_waitqueue_head(&dp->tx_empty_waitq);
-
- /* TODO: Add any RXDMA setup required per pdev */
- }
+ dp->mac_id = ar->pdev_idx;
+ atomic_set(&dp->num_tx_pending, 0);
+ init_waitqueue_head(&dp->tx_empty_waitq);
+ /* TODO: Add any RXDMA setup required per pdev */
}
bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab)
@@ -1025,7 +1029,7 @@ static void ath12k_dp_service_mon_ring(struct timer_list *t)
struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer);
int i;
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++)
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
ath12k_dp_mon_process_ring(ab, i, NULL, DP_MON_SERVICE_BUDGET,
ATH12K_DP_RX_MONITOR_MODE);
@@ -1099,9 +1103,9 @@ int ath12k_dp_htt_connect(struct ath12k_dp *dp)
return 0;
}
-static void ath12k_dp_update_vdev_search(struct ath12k_vif *arvif)
+static void ath12k_dp_update_vdev_search(struct ath12k_link_vif *arvif)
{
- switch (arvif->vdev_type) {
+ switch (arvif->ahvif->vdev_type) {
case WMI_VDEV_TYPE_STA:
/* TODO: Verify the search type and flags since ast hash
* is not part of peer mapv3
@@ -1120,7 +1124,7 @@ static void ath12k_dp_update_vdev_search(struct ath12k_vif *arvif)
}
}
-void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif)
+void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_link_vif *arvif)
{
struct ath12k_base *ab = ar->ab;
@@ -1146,11 +1150,13 @@ void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif)
static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
{
- struct ath12k_rx_desc_info *desc_info, *tmp;
+ struct ath12k_rx_desc_info *desc_info;
struct ath12k_tx_desc_info *tx_desc_info, *tmp1;
struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_skb_cb *skb_cb;
struct sk_buff *skb;
- int i;
+ struct ath12k *ar;
+ int i, j;
u32 pool_id, tx_spt_page;
if (!dp->spt_info)
@@ -1159,24 +1165,31 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
/* RX Descriptor cleanup */
spin_lock_bh(&dp->rx_desc_lock);
- list_for_each_entry_safe(desc_info, tmp, &dp->rx_desc_used_list, list) {
- list_del(&desc_info->list);
- skb = desc_info->skb;
+ for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
+ desc_info = dp->rxbaddr[i];
- if (!skb)
- continue;
+ for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
+ if (!desc_info[j].in_use) {
+ list_del(&desc_info[j].list);
+ continue;
+ }
- dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
- skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
+ skb = desc_info[j].skb;
+ if (!skb)
+ continue;
+
+ dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
}
for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
- if (!dp->spt_info->rxbaddr[i])
+ if (!dp->rxbaddr[i])
continue;
- kfree(dp->spt_info->rxbaddr[i]);
- dp->spt_info->rxbaddr[i] = NULL;
+ kfree(dp->rxbaddr[i]);
+ dp->rxbaddr[i] = NULL;
}
spin_unlock_bh(&dp->rx_desc_lock);
@@ -1193,6 +1206,17 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
if (!skb)
continue;
+ /* if we are unregistering, hw would've been destroyed and
+ * ar is no longer valid.
+ */
+ if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags))) {
+ skb_cb = ATH12K_SKB_CB(skb);
+ ar = skb_cb->ar;
+
+ if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+ wake_up(&ar->dp.tx_empty_waitq);
+ }
+
dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
@@ -1206,11 +1230,11 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
- if (!dp->spt_info->txbaddr[tx_spt_page])
+ if (!dp->txbaddr[tx_spt_page])
continue;
- kfree(dp->spt_info->txbaddr[tx_spt_page]);
- dp->spt_info->txbaddr[tx_spt_page] = NULL;
+ kfree(dp->txbaddr[tx_spt_page]);
+ dp->txbaddr[tx_spt_page] = NULL;
}
spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
@@ -1227,6 +1251,7 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
}
kfree(dp->spt_info);
+ dp->spt_info = NULL;
}
static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab)
@@ -1236,15 +1261,23 @@ static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab)
if (!ab->hw_params->reoq_lut_support)
return;
- if (!dp->reoq_lut.vaddr)
- return;
-
- dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
- dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
- dp->reoq_lut.vaddr = NULL;
+ if (dp->reoq_lut.vaddr) {
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_REO_REG +
+ HAL_REO1_QDESC_LUT_BASE0(ab), 0);
+ dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
+ dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
+ dp->reoq_lut.vaddr = NULL;
+ }
- ath12k_hif_write32(ab,
- HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab), 0);
+ if (dp->ml_reoq_lut.vaddr) {
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_REO_REG +
+ HAL_REO1_QDESC_LUT_BASE1(ab), 0);
+ dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
+ dp->ml_reoq_lut.vaddr, dp->ml_reoq_lut.paddr);
+ dp->ml_reoq_lut.vaddr = NULL;
+ }
}
void ath12k_dp_free(struct ath12k_base *ab)
@@ -1252,6 +1285,9 @@ void ath12k_dp_free(struct ath12k_base *ab)
struct ath12k_dp *dp = &ab->dp;
int i;
+ if (!dp->ab)
+ return;
+
ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
@@ -1262,11 +1298,14 @@ void ath12k_dp_free(struct ath12k_base *ab)
ath12k_dp_rx_reo_cmd_list_cleanup(ab);
- for (i = 0; i < ab->hw_params->max_tx_ring; i++)
+ for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
kfree(dp->tx_ring[i].tx_status);
+ dp->tx_ring[i].tx_status = NULL;
+ }
ath12k_dp_rx_free(ab);
/* Deinit any SOC level resource */
+ dp->ab = NULL;
}
void ath12k_dp_cc_config(struct ath12k_base *ab)
@@ -1335,16 +1374,22 @@ static inline void *ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_base *ab,
struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
u32 cookie)
{
+ struct ath12k_dp *dp = &ab->dp;
struct ath12k_rx_desc_info **desc_addr_ptr;
- u16 ppt_idx, spt_idx;
+ u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx;
ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
- spt_idx = u32_get_bits(cookie, ATH12k_DP_CC_COOKIE_SPT);
+ spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT);
- if (ppt_idx > ATH12K_NUM_RX_SPT_PAGES ||
+ start_ppt_idx = dp->rx_ppt_base + ATH12K_RX_SPT_PAGE_OFFSET;
+ end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES;
+
+ if (ppt_idx < start_ppt_idx ||
+ ppt_idx >= end_ppt_idx ||
spt_idx > ATH12K_MAX_SPT_ENTRIES)
return NULL;
+ ppt_idx = ppt_idx - dp->rx_ppt_base;
desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
return *desc_addr_ptr;
@@ -1354,13 +1399,17 @@ struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
u32 cookie)
{
struct ath12k_tx_desc_info **desc_addr_ptr;
- u16 ppt_idx, spt_idx;
+ u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx;
ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
- spt_idx = u32_get_bits(cookie, ATH12k_DP_CC_COOKIE_SPT);
+ spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT);
+
+ start_ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET;
+ end_ppt_idx = start_ppt_idx +
+ (ATH12K_TX_SPT_PAGES_PER_POOL * ATH12K_HW_MAX_QUEUES);
- if (ppt_idx < ATH12K_NUM_RX_SPT_PAGES ||
- ppt_idx > ab->dp.num_spt_pages ||
+ if (ppt_idx < start_ppt_idx ||
+ ppt_idx >= end_ppt_idx ||
spt_idx > ATH12K_MAX_SPT_ENTRIES)
return NULL;
@@ -1375,7 +1424,7 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr;
struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr;
u32 i, j, pool_id, tx_spt_page;
- u32 ppt_idx;
+ u32 ppt_idx, cookie_ppt_idx;
spin_lock_bh(&dp->rx_desc_lock);
@@ -1389,15 +1438,18 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
return -ENOMEM;
}
- dp->spt_info->rxbaddr[i] = &rx_descs[0];
+ ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET + i;
+ cookie_ppt_idx = dp->rx_ppt_base + ppt_idx;
+ dp->rxbaddr[i] = &rx_descs[0];
for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
- rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(i, j);
+ rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(cookie_ppt_idx, j);
rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
+ rx_descs[j].device_id = ab->device_id;
list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list);
/* Update descriptor VA in SPT */
- rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(ab, i, j);
+ rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, j);
*rx_desc_addr = &rx_descs[j];
}
}
@@ -1417,10 +1469,11 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
}
tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
- dp->spt_info->txbaddr[tx_spt_page] = &tx_descs[0];
+ ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET + tx_spt_page;
+
+ dp->txbaddr[tx_spt_page] = &tx_descs[0];
for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
- ppt_idx = ATH12K_NUM_RX_SPT_PAGES + tx_spt_page;
tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, j);
tx_descs[j].pool_id = pool_id;
list_add_tail(&tx_descs[j].list,
@@ -1437,14 +1490,57 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
return 0;
}
+static int ath12k_dp_cmem_init(struct ath12k_base *ab,
+ struct ath12k_dp *dp,
+ enum ath12k_dp_desc_type type)
+{
+ u32 cmem_base;
+ int i, start, end;
+
+ cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
+
+ switch (type) {
+ case ATH12K_DP_TX_DESC:
+ start = ATH12K_TX_SPT_PAGE_OFFSET;
+ end = start + ATH12K_NUM_TX_SPT_PAGES;
+ break;
+ case ATH12K_DP_RX_DESC:
+ cmem_base += ATH12K_PPT_ADDR_OFFSET(dp->rx_ppt_base);
+ start = ATH12K_RX_SPT_PAGE_OFFSET;
+ end = start + ATH12K_NUM_RX_SPT_PAGES;
+ break;
+ default:
+ ath12k_err(ab, "invalid descriptor type %d in cmem init\n", type);
+ return -EINVAL;
+ }
+
+ /* Write to PPT in CMEM */
+ for (i = start; i < end; i++)
+ ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
+ dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET);
+
+ return 0;
+}
+
+void ath12k_dp_partner_cc_init(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag = ab->ag;
+ int i;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ if (ag->ab[i] == ab)
+ continue;
+
+ ath12k_dp_cmem_init(ab, &ag->ab[i]->dp, ATH12K_DP_RX_DESC);
+ }
+}
+
static int ath12k_dp_cc_init(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
int i, ret = 0;
- u32 cmem_base;
INIT_LIST_HEAD(&dp->rx_desc_free_list);
- INIT_LIST_HEAD(&dp->rx_desc_used_list);
spin_lock_init(&dp->rx_desc_lock);
for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
@@ -1465,7 +1561,7 @@ static int ath12k_dp_cc_init(struct ath12k_base *ab)
return -ENOMEM;
}
- cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
+ dp->rx_ppt_base = ab->device_id * ATH12K_NUM_RX_SPT_PAGES;
for (i = 0; i < dp->num_spt_pages; i++) {
dp->spt_info[i].vaddr = dma_alloc_coherent(ab->dev,
@@ -1483,10 +1579,18 @@ static int ath12k_dp_cc_init(struct ath12k_base *ab)
ret = -EINVAL;
goto free;
}
+ }
- /* Write to PPT in CMEM */
- ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
- dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET);
+ ret = ath12k_dp_cmem_init(ab, dp, ATH12K_DP_TX_DESC);
+ if (ret) {
+ ath12k_warn(ab, "HW CC Tx cmem init failed %d", ret);
+ goto free;
+ }
+
+ ret = ath12k_dp_cmem_init(ab, dp, ATH12K_DP_RX_DESC);
+ if (ret) {
+ ath12k_warn(ab, "HW CC Rx cmem init failed %d", ret);
+ goto free;
}
ret = ath12k_dp_cc_desc_init(ab);
@@ -1517,11 +1621,44 @@ static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
return -ENOMEM;
}
+ dp->ml_reoq_lut.vaddr = dma_alloc_coherent(ab->dev,
+ DP_REOQ_LUT_SIZE,
+ &dp->ml_reoq_lut.paddr,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!dp->ml_reoq_lut.vaddr) {
+ ath12k_warn(ab, "failed to allocate memory for ML reoq table");
+ dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
+ dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
+ dp->reoq_lut.vaddr = NULL;
+ return -ENOMEM;
+ }
+
ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab),
dp->reoq_lut.paddr);
+ ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE1(ab),
+ dp->ml_reoq_lut.paddr >> 8);
+
return 0;
}
+static enum hal_rx_buf_return_buf_manager
+ath12k_dp_get_idle_link_rbm(struct ath12k_base *ab)
+{
+ switch (ab->device_id) {
+ case 0:
+ return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST;
+ case 1:
+ return HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST;
+ case 2:
+ return HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST;
+ default:
+ ath12k_warn(ab, "invalid %d device id, so choose default rbm\n",
+ ab->device_id);
+ WARN_ON(1);
+ return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST;
+ }
+}
+
int ath12k_dp_alloc(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
@@ -1538,6 +1675,7 @@ int ath12k_dp_alloc(struct ath12k_base *ab)
spin_lock_init(&dp->reo_cmd_lock);
dp->reo_cmd_cache_flush_count = 0;
+ dp->idle_link_rbm = ath12k_dp_get_idle_link_rbm(ab);
ret = ath12k_wbm_idle_ring_setup(ab, &n_link_desc);
if (ret) {
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index eb2dd408e081..7ac3143de016 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -16,6 +16,7 @@ struct ath12k_base;
struct ath12k_peer;
struct ath12k_dp;
struct ath12k_vif;
+struct ath12k_link_vif;
struct hal_tcl_status_ring;
struct ath12k_ext_irq_grp;
@@ -223,6 +224,9 @@ struct ath12k_pdev_dp {
#define ATH12K_NUM_TX_SPT_PAGES (ATH12K_TX_SPT_PAGES_PER_POOL * ATH12K_HW_MAX_QUEUES)
#define ATH12K_NUM_SPT_PAGES (ATH12K_NUM_RX_SPT_PAGES + ATH12K_NUM_TX_SPT_PAGES)
+#define ATH12K_TX_SPT_PAGE_OFFSET 0
+#define ATH12K_RX_SPT_PAGE_OFFSET ATH12K_NUM_TX_SPT_PAGES
+
/* The SPT pages are divided for RX and TX, first block for RX
* and remaining for TX
*/
@@ -245,7 +249,7 @@ struct ath12k_pdev_dp {
#define ATH12K_CC_SPT_MSB 8
#define ATH12K_CC_PPT_MSB 19
#define ATH12K_CC_PPT_SHIFT 9
-#define ATH12k_DP_CC_COOKIE_SPT GENMASK(8, 0)
+#define ATH12K_DP_CC_COOKIE_SPT GENMASK(8, 0)
#define ATH12K_DP_CC_COOKIE_PPT GENMASK(19, 9)
#define DP_REO_QREF_NUM GENMASK(31, 16)
@@ -282,6 +286,9 @@ struct ath12k_rx_desc_info {
struct sk_buff *skb;
u32 cookie;
u32 magic;
+ u8 in_use : 1,
+ device_id : 3,
+ reserved : 4;
};
struct ath12k_tx_desc_info {
@@ -295,8 +302,6 @@ struct ath12k_tx_desc_info {
struct ath12k_spt_info {
dma_addr_t paddr;
u64 *vaddr;
- struct ath12k_rx_desc_info *rxbaddr[ATH12K_NUM_RX_SPT_PAGES];
- struct ath12k_tx_desc_info *txbaddr[ATH12K_NUM_TX_SPT_PAGES];
};
struct ath12k_reo_queue_ref {
@@ -320,15 +325,15 @@ struct ath12k_dp {
u8 htt_tgt_ver_major;
u8 htt_tgt_ver_minor;
struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
+ enum hal_rx_buf_return_buf_manager idle_link_rbm;
struct dp_srng wbm_idle_ring;
struct dp_srng wbm_desc_rel_ring;
- struct dp_srng tcl_cmd_ring;
- struct dp_srng tcl_status_ring;
struct dp_srng reo_reinject_ring;
struct dp_srng rx_rel_ring;
struct dp_srng reo_except_ring;
struct dp_srng reo_cmd_ring;
struct dp_srng reo_status_ring;
+ enum ath12k_peer_metadata_version peer_metadata_ver;
struct dp_srng reo_dst_ring[DP_REO_DST_RING_MAX];
struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX];
struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
@@ -346,9 +351,11 @@ struct ath12k_dp {
struct ath12k_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX];
struct ath12k_spt_info *spt_info;
u32 num_spt_pages;
+ u32 rx_ppt_base;
+ struct ath12k_rx_desc_info *rxbaddr[ATH12K_NUM_RX_SPT_PAGES];
+ struct ath12k_tx_desc_info *txbaddr[ATH12K_NUM_TX_SPT_PAGES];
struct list_head rx_desc_free_list;
- struct list_head rx_desc_used_list;
- /* protects the free and used desc list */
+ /* protects the free desc list */
spinlock_t rx_desc_lock;
struct list_head tx_desc_free_list[ATH12K_HW_MAX_QUEUES];
@@ -362,6 +369,7 @@ struct ath12k_dp {
struct dp_rxdma_mon_ring rxdma_mon_buf_ring;
struct dp_rxdma_mon_ring tx_mon_buf_ring;
struct ath12k_reo_q_addr_lut reoq_lut;
+ struct ath12k_reo_q_addr_lut ml_reoq_lut;
};
/* HTT definitions */
@@ -377,8 +385,6 @@ struct ath12k_dp {
/* peer meta data */
#define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 2)
-#define HTT_TX_WBM_COMP_STATUS_OFFSET 8
-
/* HTT tx completion is overlaid in wbm_release_ring */
#define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(16, 13)
#define HTT_TX_WBM_COMP_INFO1_REINJECT_REASON GENMASK(3, 0)
@@ -690,9 +696,9 @@ enum htt_stats_internal_ppdu_frametype {
*
* The message would appear as follows:
*
- * |31 26|25|24|23 16|15 8|7 0|
- * |-----------------+----------------+----------------+---------------|
- * | rsvd1 |PS|SS| ring_id | pdev_id | msg_type |
+ * |31 29|28|27|26|25|24|23 16|15 8|7 0|
+ * |-------+--+--+--+--+--+-----------+----------------+---------------|
+ * | rsvd1 |ED|DT|OV|PS|SS| ring_id | pdev_id | msg_type |
* |-------------------------------------------------------------------|
* | rsvd2 | ring_buffer_size |
* |-------------------------------------------------------------------|
@@ -719,7 +725,13 @@ enum htt_stats_internal_ppdu_frametype {
* More details can be got from enum htt_srng_ring_id
* b'24 - status_swap: 1 is to swap status TLV
* b'25 - pkt_swap: 1 is to swap packet TLV
- * b'26:31 - rsvd1: reserved for future use
+ * b'26 - rx_offset_valid (OV): flag to indicate rx offsets
+ * configuration fields are valid
+ * b'27 - drop_thresh_valid (DT): flag to indicate if the
+ * rx_drop_threshold field is valid
+ * b'28 - rx_mon_global_en: Enable/Disable global register
+ * configuration in Rx monitor module.
+ * b'29:31 - rsvd1: reserved for future use
* dword1 - b'0:16 - ring_buffer_size: size of buffers referenced by rx ring,
* in byte units.
* Valid only for HW_TO_SW_RING and SW_TO_HW_RING
@@ -1492,18 +1504,6 @@ struct htt_ppdu_stats_user_rate {
#define HTT_TX_INFO_PEERID(_flags) \
u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M)
-struct htt_tx_ppdu_stats_info {
- struct htt_tlv tlv_hdr;
- __le32 tx_success_bytes;
- __le32 tx_retry_bytes;
- __le32 tx_failed_bytes;
- __le32 flags; /* %HTT_PPDU_STATS_TX_INFO_FLAGS_ */
- __le16 tx_success_msdus;
- __le16 tx_retry_msdus;
- __le16 tx_failed_msdus;
- __le16 tx_duration; /* united in us */
-} __packed;
-
enum htt_ppdu_stats_usr_compln_status {
HTT_PPDU_STATS_USER_STATUS_OK,
HTT_PPDU_STATS_USER_STATUS_FILTERED,
@@ -1798,6 +1798,18 @@ enum vdev_stats_offload_timer_duration {
ATH12K_STATS_TIMER_DUR_2SEC = 3,
};
+#define ATH12K_HTT_MAC_ADDR_L32_0 GENMASK(7, 0)
+#define ATH12K_HTT_MAC_ADDR_L32_1 GENMASK(15, 8)
+#define ATH12K_HTT_MAC_ADDR_L32_2 GENMASK(23, 16)
+#define ATH12K_HTT_MAC_ADDR_L32_3 GENMASK(31, 24)
+#define ATH12K_HTT_MAC_ADDR_H16_0 GENMASK(7, 0)
+#define ATH12K_HTT_MAC_ADDR_H16_1 GENMASK(15, 8)
+
+struct htt_mac_addr {
+ __le32 mac_addr_l32;
+ __le32 mac_addr_h16;
+} __packed;
+
static inline void ath12k_dp_get_mac_addr(u32 addr_l32, u16 addr_h16, u8 *addr)
{
memcpy(addr, &addr_l32, 4);
@@ -1808,12 +1820,13 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
struct ath12k_ext_irq_grp *irq_grp,
int budget);
int ath12k_dp_htt_connect(struct ath12k_dp *dp);
-void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif);
+void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_link_vif *arvif);
void ath12k_dp_free(struct ath12k_base *ab);
int ath12k_dp_alloc(struct ath12k_base *ab);
void ath12k_dp_cc_config(struct ath12k_base *ab);
+void ath12k_dp_partner_cc_init(struct ath12k_base *ab);
int ath12k_dp_pdev_alloc(struct ath12k_base *ab);
-void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab);
+void ath12k_dp_pdev_pre_alloc(struct ath12k *ar);
void ath12k_dp_pdev_free(struct ath12k_base *ab);
int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id,
int mac_id, enum hal_ring_type ring_type);
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
index 2d56913a75d0..5a21961cfd46 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -10,11 +10,10 @@
#include "dp_tx.h"
#include "peer.h"
-static void ath12k_dp_mon_rx_handle_ofdma_info(void *rx_tlv,
- struct hal_rx_user_status *rx_user_status)
+static void
+ath12k_dp_mon_rx_handle_ofdma_info(const struct hal_rx_ppdu_end_user_stats *ppdu_end_user,
+ struct hal_rx_user_status *rx_user_status)
{
- struct hal_rx_ppdu_end_user_stats *ppdu_end_user = rx_tlv;
-
rx_user_status->ul_ofdma_user_v0_word0 =
__le32_to_cpu(ppdu_end_user->usr_resp_ref);
rx_user_status->ul_ofdma_user_v0_word1 =
@@ -26,19 +25,16 @@ ath12k_dp_mon_rx_populate_byte_count(const struct hal_rx_ppdu_end_user_stats *st
void *ppduinfo,
struct hal_rx_user_status *rx_user_status)
{
- u32 mpdu_ok_byte_count = __le32_to_cpu(stats->mpdu_ok_cnt);
- u32 mpdu_err_byte_count = __le32_to_cpu(stats->mpdu_err_cnt);
-
rx_user_status->mpdu_ok_byte_count =
- u32_get_bits(mpdu_ok_byte_count,
- HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_OK_BYTE_COUNT);
+ le32_get_bits(stats->info7,
+ HAL_RX_PPDU_END_USER_STATS_INFO7_MPDU_OK_BYTE_COUNT);
rx_user_status->mpdu_err_byte_count =
- u32_get_bits(mpdu_err_byte_count,
- HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_ERR_BYTE_COUNT);
+ le32_get_bits(stats->info8,
+ HAL_RX_PPDU_END_USER_STATS_INFO8_MPDU_ERR_BYTE_COUNT);
}
static void
-ath12k_dp_mon_rx_populate_mu_user_info(void *rx_tlv,
+ath12k_dp_mon_rx_populate_mu_user_info(const struct hal_rx_ppdu_end_user_stats *rx_tlv,
struct hal_rx_mon_ppdu_info *ppdu_info,
struct hal_rx_user_status *rx_user_status)
{
@@ -76,11 +72,9 @@ ath12k_dp_mon_rx_populate_mu_user_info(void *rx_tlv,
ath12k_dp_mon_rx_populate_byte_count(rx_tlv, ppdu_info, rx_user_status);
}
-static void ath12k_dp_mon_parse_vht_sig_a(u8 *tlv_data,
+static void ath12k_dp_mon_parse_vht_sig_a(const struct hal_rx_vht_sig_a_info *vht_sig,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_vht_sig_a_info *vht_sig =
- (struct hal_rx_vht_sig_a_info *)tlv_data;
u32 nsts, group_id, info0, info1;
u8 gi_setting;
@@ -122,11 +116,9 @@ static void ath12k_dp_mon_parse_vht_sig_a(u8 *tlv_data,
u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING);
}
-static void ath12k_dp_mon_parse_ht_sig(u8 *tlv_data,
+static void ath12k_dp_mon_parse_ht_sig(const struct hal_rx_ht_sig_info *ht_sig,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_ht_sig_info *ht_sig =
- (struct hal_rx_ht_sig_info *)tlv_data;
u32 info0 = __le32_to_cpu(ht_sig->info0);
u32 info1 = __le32_to_cpu(ht_sig->info1);
@@ -139,11 +131,9 @@ static void ath12k_dp_mon_parse_ht_sig(u8 *tlv_data,
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
}
-static void ath12k_dp_mon_parse_l_sig_b(u8 *tlv_data,
+static void ath12k_dp_mon_parse_l_sig_b(const struct hal_rx_lsig_b_info *lsigb,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_lsig_b_info *lsigb =
- (struct hal_rx_lsig_b_info *)tlv_data;
u32 info0 = __le32_to_cpu(lsigb->info0);
u8 rate;
@@ -173,11 +163,9 @@ static void ath12k_dp_mon_parse_l_sig_b(u8 *tlv_data,
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
}
-static void ath12k_dp_mon_parse_l_sig_a(u8 *tlv_data,
+static void ath12k_dp_mon_parse_l_sig_a(const struct hal_rx_lsig_a_info *lsiga,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_lsig_a_info *lsiga =
- (struct hal_rx_lsig_a_info *)tlv_data;
u32 info0 = __le32_to_cpu(lsiga->info0);
u8 rate;
@@ -215,14 +203,13 @@ static void ath12k_dp_mon_parse_l_sig_a(u8 *tlv_data,
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
}
-static void ath12k_dp_mon_parse_he_sig_b2_ofdma(u8 *tlv_data,
- struct hal_rx_mon_ppdu_info *ppdu_info)
+static void
+ath12k_dp_mon_parse_he_sig_b2_ofdma(const struct hal_rx_he_sig_b2_ofdma_info *ofdma,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_he_sig_b2_ofdma_info *he_sig_b2_ofdma =
- (struct hal_rx_he_sig_b2_ofdma_info *)tlv_data;
u32 info0, value;
- info0 = __le32_to_cpu(he_sig_b2_ofdma->info0);
+ info0 = __le32_to_cpu(ofdma->info0);
ppdu_info->he_data1 |= HE_MCS_KNOWN | HE_DCM_KNOWN | HE_CODING_KNOWN;
@@ -253,11 +240,10 @@ static void ath12k_dp_mon_parse_he_sig_b2_ofdma(u8 *tlv_data,
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
}
-static void ath12k_dp_mon_parse_he_sig_b2_mu(u8 *tlv_data,
- struct hal_rx_mon_ppdu_info *ppdu_info)
+static void
+ath12k_dp_mon_parse_he_sig_b2_mu(const struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu =
- (struct hal_rx_he_sig_b2_mu_info *)tlv_data;
u32 info0, value;
info0 = __le32_to_cpu(he_sig_b2_mu->info0);
@@ -280,11 +266,10 @@ static void ath12k_dp_mon_parse_he_sig_b2_mu(u8 *tlv_data,
ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS);
}
-static void ath12k_dp_mon_parse_he_sig_b1_mu(u8 *tlv_data,
- struct hal_rx_mon_ppdu_info *ppdu_info)
+static void
+ath12k_dp_mon_parse_he_sig_b1_mu(const struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu =
- (struct hal_rx_he_sig_b1_mu_info *)tlv_data;
u32 info0 = __le32_to_cpu(he_sig_b1_mu->info0);
u16 ru_tones;
@@ -295,11 +280,10 @@ static void ath12k_dp_mon_parse_he_sig_b1_mu(u8 *tlv_data,
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
}
-static void ath12k_dp_mon_parse_he_sig_mu(u8 *tlv_data,
- struct hal_rx_mon_ppdu_info *ppdu_info)
+static void
+ath12k_dp_mon_parse_he_sig_mu(const struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl =
- (struct hal_rx_he_sig_a_mu_dl_info *)tlv_data;
u32 info0, info1, value;
u16 he_gi = 0, he_ltf = 0;
@@ -430,11 +414,9 @@ static void ath12k_dp_mon_parse_he_sig_mu(u8 *tlv_data,
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
}
-static void ath12k_dp_mon_parse_he_sig_su(u8 *tlv_data,
+static void ath12k_dp_mon_parse_he_sig_su(const struct hal_rx_he_sig_a_su_info *he_sig_a,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
- struct hal_rx_he_sig_a_su_info *he_sig_a =
- (struct hal_rx_he_sig_a_su_info *)tlv_data;
u32 info0, info1, value;
u32 dcm;
u8 he_dcm = 0, he_stbc = 0;
@@ -583,22 +565,30 @@ static void ath12k_dp_mon_parse_he_sig_su(u8 *tlv_data,
static enum hal_rx_mon_status
ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
struct ath12k_mon_data *pmon,
- u32 tlv_tag, u8 *tlv_data, u32 userid)
+ u32 tlv_tag, const void *tlv_data,
+ u32 userid)
{
struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
u32 info[7];
switch (tlv_tag) {
case HAL_RX_PPDU_START: {
- struct hal_rx_ppdu_start *ppdu_start =
- (struct hal_rx_ppdu_start *)tlv_data;
+ const struct hal_rx_ppdu_start *ppdu_start = tlv_data;
+
+ u64 ppdu_ts = ath12k_le32hilo_to_u64(ppdu_start->ppdu_start_ts_63_32,
+ ppdu_start->ppdu_start_ts_31_0);
info[0] = __le32_to_cpu(ppdu_start->info0);
- ppdu_info->ppdu_id =
- u32_get_bits(info[0], HAL_RX_PPDU_START_INFO0_PPDU_ID);
- ppdu_info->chan_num = __le32_to_cpu(ppdu_start->chan_num);
- ppdu_info->ppdu_ts = __le32_to_cpu(ppdu_start->ppdu_start_ts);
+ ppdu_info->ppdu_id = u32_get_bits(info[0],
+ HAL_RX_PPDU_START_INFO0_PPDU_ID);
+
+ info[1] = __le32_to_cpu(ppdu_start->info1);
+ ppdu_info->chan_num = u32_get_bits(info[1],
+ HAL_RX_PPDU_START_INFO1_CHAN_NUM);
+ ppdu_info->freq = u32_get_bits(info[1],
+ HAL_RX_PPDU_START_INFO1_CHAN_FREQ);
+ ppdu_info->ppdu_ts = ppdu_ts;
if (ppdu_info->ppdu_id != ppdu_info->last_ppdu_id) {
ppdu_info->last_ppdu_id = ppdu_info->ppdu_id;
@@ -610,8 +600,8 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
break;
}
case HAL_RX_PPDU_END_USER_STATS: {
- struct hal_rx_ppdu_end_user_stats *eu_stats =
- (struct hal_rx_ppdu_end_user_stats *)tlv_data;
+ const struct hal_rx_ppdu_end_user_stats *eu_stats = tlv_data;
+ u32 tid_bitmap;
info[0] = __le32_to_cpu(eu_stats->info0);
info[1] = __le32_to_cpu(eu_stats->info1);
@@ -624,10 +614,9 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
u32_get_bits(info[2], HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX);
ppdu_info->fc_valid =
u32_get_bits(info[1], HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID);
- ppdu_info->tid =
- ffs(u32_get_bits(info[6],
- HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP)
- - 1);
+ tid_bitmap = u32_get_bits(info[6],
+ HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP);
+ ppdu_info->tid = ffs(tid_bitmap) - 1;
ppdu_info->tcp_msdu_count =
u32_get_bits(info[4],
HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT);
@@ -668,8 +657,8 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
&ppdu_info->userstats[userid];
ppdu_info->num_users += 1;
- ath12k_dp_mon_rx_handle_ofdma_info(tlv_data, rxuser_stats);
- ath12k_dp_mon_rx_populate_mu_user_info(tlv_data, ppdu_info,
+ ath12k_dp_mon_rx_handle_ofdma_info(eu_stats, rxuser_stats);
+ ath12k_dp_mon_rx_populate_mu_user_info(eu_stats, ppdu_info,
rxuser_stats);
}
ppdu_info->mpdu_fcs_ok_bitmap[0] = __le32_to_cpu(eu_stats->rsvd1[0]);
@@ -677,8 +666,8 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
break;
}
case HAL_RX_PPDU_END_USER_STATS_EXT: {
- struct hal_rx_ppdu_end_user_stats_ext *eu_stats =
- (struct hal_rx_ppdu_end_user_stats_ext *)tlv_data;
+ const struct hal_rx_ppdu_end_user_stats_ext *eu_stats = tlv_data;
+
ppdu_info->mpdu_fcs_ok_bitmap[2] = __le32_to_cpu(eu_stats->info1);
ppdu_info->mpdu_fcs_ok_bitmap[3] = __le32_to_cpu(eu_stats->info2);
ppdu_info->mpdu_fcs_ok_bitmap[4] = __le32_to_cpu(eu_stats->info3);
@@ -724,40 +713,25 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
break;
case HAL_PHYRX_RSSI_LEGACY: {
- struct hal_rx_phyrx_rssi_legacy_info *rssi =
- (struct hal_rx_phyrx_rssi_legacy_info *)tlv_data;
- u32 reception_type = 0;
- u32 rssi_legacy_info = __le32_to_cpu(rssi->rsvd[0]);
+ const struct hal_rx_phyrx_rssi_legacy_info *rssi = tlv_data;
info[0] = __le32_to_cpu(rssi->info0);
+ info[1] = __le32_to_cpu(rssi->info1);
/* TODO: Please note that the combined rssi will not be accurate
* in MU case. Rssi in MU needs to be retrieved from
* PHYRX_OTHER_RECEIVE_INFO TLV.
*/
ppdu_info->rssi_comb =
- u32_get_bits(info[0],
- HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB);
- reception_type =
- u32_get_bits(rssi_legacy_info,
- HAL_RX_PHYRX_RSSI_LEGACY_INFO_RSVD1_RECEPTION);
-
- switch (reception_type) {
- case HAL_RECEPTION_TYPE_ULOFMDA:
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
- break;
- case HAL_RECEPTION_TYPE_ULMIMO:
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
- break;
- default:
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
- break;
- }
+ u32_get_bits(info[1],
+ HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB);
+
+ ppdu_info->bw = u32_get_bits(info[0],
+ HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW);
break;
}
case HAL_RXPCU_PPDU_END_INFO: {
- struct hal_rx_ppdu_end_duration *ppdu_rx_duration =
- (struct hal_rx_ppdu_end_duration *)tlv_data;
+ const struct hal_rx_ppdu_end_duration *ppdu_rx_duration = tlv_data;
info[0] = __le32_to_cpu(ppdu_rx_duration->info0);
ppdu_info->rx_duration =
@@ -768,8 +742,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
break;
}
case HAL_RX_MPDU_START: {
- struct hal_rx_mpdu_start *mpdu_start =
- (struct hal_rx_mpdu_start *)tlv_data;
+ const struct hal_rx_mpdu_start *mpdu_start = tlv_data;
struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
u16 peer_id;
@@ -798,8 +771,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
break;
case HAL_MON_BUF_ADDR: {
struct dp_rxdma_mon_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring;
- struct dp_mon_packet_info *packet_info =
- (struct dp_mon_packet_info *)tlv_data;
+ const struct dp_mon_packet_info *packet_info = tlv_data;
int buf_id = u32_get_bits(packet_info->cookie,
DP_RXDMA_BUF_COOKIE_BUF_ID);
struct sk_buff *msdu;
@@ -831,8 +803,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
break;
}
case HAL_RX_MSDU_END: {
- struct rx_msdu_end_qcn9274 *msdu_end =
- (struct rx_msdu_end_qcn9274 *)tlv_data;
+ const struct rx_msdu_end_qcn9274 *msdu_end = tlv_data;
bool is_first_msdu_in_mpdu;
u16 msdu_end_info;
@@ -860,27 +831,29 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
}
-static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar, struct sk_buff *msdu)
+static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar,
+ struct sk_buff *head_msdu,
+ struct sk_buff *tail_msdu)
{
u32 rx_pkt_offset, l2_hdr_offset;
rx_pkt_offset = ar->ab->hal.hal_desc_sz;
- l2_hdr_offset = ath12k_dp_rx_h_l3pad(ar->ab,
- (struct hal_rx_desc *)msdu->data);
- skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
+ l2_hdr_offset =
+ ath12k_dp_rx_h_l3pad(ar->ab, (struct hal_rx_desc *)tail_msdu->data);
+ skb_pull(head_msdu, rx_pkt_offset + l2_hdr_offset);
}
static struct sk_buff *
-ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
- u32 mac_id, struct sk_buff *head_msdu,
+ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, u32 mac_id,
+ struct sk_buff *head_msdu, struct sk_buff *tail_msdu,
struct ieee80211_rx_status *rxs, bool *fcs_err)
{
struct ath12k_base *ab = ar->ab;
- struct sk_buff *msdu, *mpdu_buf, *prev_buf;
- struct hal_rx_desc *rx_desc;
+ struct sk_buff *msdu, *mpdu_buf, *prev_buf, *head_frag_list;
+ struct hal_rx_desc *rx_desc, *tail_rx_desc;
u8 *hdr_desc, *dest, decap_format;
struct ieee80211_hdr_3addr *wh;
- u32 err_bitmap;
+ u32 err_bitmap, frag_list_sum_len = 0;
mpdu_buf = NULL;
@@ -888,24 +861,30 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
goto err_merge_fail;
rx_desc = (struct hal_rx_desc *)head_msdu->data;
- err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
+ tail_rx_desc = (struct hal_rx_desc *)tail_msdu->data;
+ err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, tail_rx_desc);
if (err_bitmap & HAL_RX_MPDU_ERR_FCS)
*fcs_err = true;
- decap_format = ath12k_dp_rx_h_decap_type(ab, rx_desc);
+ decap_format = ath12k_dp_rx_h_decap_type(ab, tail_rx_desc);
- ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
+ ath12k_dp_rx_h_ppdu(ar, tail_rx_desc, rxs);
if (decap_format == DP_RX_DECAP_TYPE_RAW) {
- ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu);
+ ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu);
prev_buf = head_msdu;
msdu = head_msdu->next;
+ head_frag_list = NULL;
while (msdu) {
- ath12k_dp_mon_rx_msdus_set_payload(ar, msdu);
+ ath12k_dp_mon_rx_msdus_set_payload(ar, msdu, tail_msdu);
+
+ if (!head_frag_list)
+ head_frag_list = msdu;
+ frag_list_sum_len += msdu->len;
prev_buf = msdu;
msdu = msdu->next;
}
@@ -913,6 +892,12 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
prev_buf->next = NULL;
skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
+ if (head_frag_list) {
+ skb_shinfo(head_msdu)->frag_list = head_frag_list;
+ head_msdu->data_len = frag_list_sum_len;
+ head_msdu->len += head_msdu->data_len;
+ head_msdu->next = NULL;
+ }
} else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
u8 qos_pkt = 0;
@@ -929,7 +914,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
msdu = head_msdu;
while (msdu) {
- ath12k_dp_mon_rx_msdus_set_payload(ar, msdu);
+ ath12k_dp_mon_rx_msdus_set_payload(ar, msdu, tail_msdu);
if (qos_pkt) {
dest = skb_push(msdu, sizeof(__le16));
if (!dest)
@@ -944,7 +929,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
goto err_merge_fail;
ath12k_dbg(ab, ATH12K_DBG_DATA,
- "mpdu_buf %pK mpdu_buf->len %u",
+ "mpdu_buf %p mpdu_buf->len %u",
prev_buf, prev_buf->len);
} else {
ath12k_dbg(ab, ATH12K_DBG_DATA,
@@ -958,7 +943,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
err_merge_fail:
if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) {
ath12k_dbg(ab, ATH12K_DBG_DATA,
- "err_merge_fail mpdu_buf %pK", mpdu_buf);
+ "err_merge_fail mpdu_buf %p", mpdu_buf);
/* Free the head buffer */
dev_kfree_skb_any(mpdu_buf);
}
@@ -1087,12 +1072,18 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
decap = ath12k_dp_rx_h_decap_type(ar->ab, rxcb->rx_desc);
spin_lock_bh(&ar->ab->base_lock);
peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
- if (peer && peer->sta)
+ if (peer && peer->sta) {
pubsta = peer->sta;
+ if (pubsta->valid_links) {
+ status->link_valid = 1;
+ status->link_id = peer->link_id;
+ }
+ }
+
spin_unlock_bh(&ar->ab->base_lock);
ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
- "rx skb %pK len %u peer %pM %u %s %s%s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %p len %u peer %pM %u %s %s%s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
msdu,
msdu->len,
peer ? peer->addr : NULL,
@@ -1135,7 +1126,7 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
}
static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id,
- struct sk_buff *head_msdu,
+ struct sk_buff *head_msdu, struct sk_buff *tail_msdu,
struct hal_rx_mon_ppdu_info *ppduinfo,
struct napi_struct *napi)
{
@@ -1144,7 +1135,8 @@ static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id,
struct ieee80211_rx_status *rxs = &dp->rx_status;
bool fcs_err = false;
- mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mac_id, head_msdu,
+ mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mac_id,
+ head_msdu, tail_msdu,
rxs, &fcs_err);
if (!mon_skb)
goto mon_deliver_fail;
@@ -1192,19 +1184,19 @@ ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon
struct sk_buff *skb)
{
struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
- struct hal_tlv_hdr *tlv;
+ struct hal_tlv_64_hdr *tlv;
enum hal_rx_mon_status hal_status;
- u32 tlv_userid = 0;
+ u32 tlv_userid;
u16 tlv_tag, tlv_len;
u8 *ptr = skb->data;
memset(ppdu_info, 0, sizeof(struct hal_rx_mon_ppdu_info));
do {
- tlv = (struct hal_tlv_hdr *)ptr;
- tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG);
- tlv_len = le32_get_bits(tlv->tl, HAL_TLV_HDR_LEN);
- tlv_userid = le32_get_bits(tlv->tl, HAL_TLV_USR_ID);
+ tlv = (struct hal_tlv_64_hdr *)ptr;
+ tlv_tag = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_TAG);
+ tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN);
+ tlv_userid = le64_get_bits(tlv->tl, HAL_TLV_64_USR_ID);
ptr += sizeof(*tlv);
/* The actual length of PPDU_END is the combined length of many PHY
@@ -1219,7 +1211,7 @@ ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon
hal_status = ath12k_dp_mon_rx_parse_status_tlv(ab, pmon,
tlv_tag, ptr, tlv_userid);
ptr += tlv_len;
- ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);
+ ptr = PTR_ALIGN(ptr, HAL_TLV_64_ALIGN);
if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE)
break;
@@ -1252,7 +1244,7 @@ ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
if (head_msdu && tail_msdu) {
ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu,
- ppdu_info, napi);
+ tail_msdu, ppdu_info, napi);
}
kfree(mon_mpdu);
@@ -1596,7 +1588,7 @@ ath12k_dp_mon_tx_gen_prot_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
static enum dp_mon_tx_tlv_status
ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
struct ath12k_mon_data *pmon,
- u16 tlv_tag, u8 *tlv_data, u32 userid)
+ u16 tlv_tag, const void *tlv_data, u32 userid)
{
struct dp_mon_tx_ppdu_info *tx_ppdu_info;
enum dp_mon_tx_tlv_status status = DP_MON_TX_STATUS_PPDU_NOT_DONE;
@@ -1606,8 +1598,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
switch (tlv_tag) {
case HAL_TX_FES_SETUP: {
- struct hal_tx_fes_setup *tx_fes_setup =
- (struct hal_tx_fes_setup *)tlv_data;
+ const struct hal_tx_fes_setup *tx_fes_setup = tlv_data;
info[0] = __le32_to_cpu(tx_fes_setup->info0);
tx_ppdu_info->ppdu_id = __le32_to_cpu(tx_fes_setup->schedule_id);
@@ -1618,8 +1609,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_TX_FES_STATUS_END: {
- struct hal_tx_fes_status_end *tx_fes_status_end =
- (struct hal_tx_fes_status_end *)tlv_data;
+ const struct hal_tx_fes_status_end *tx_fes_status_end = tlv_data;
u32 tst_15_0, tst_31_16;
info[0] = __le32_to_cpu(tx_fes_status_end->info0);
@@ -1636,8 +1626,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_RX_RESPONSE_REQUIRED_INFO: {
- struct hal_rx_resp_req_info *rx_resp_req_info =
- (struct hal_rx_resp_req_info *)tlv_data;
+ const struct hal_rx_resp_req_info *rx_resp_req_info = tlv_data;
u32 addr_32;
u16 addr_16;
@@ -1682,8 +1671,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_PCU_PPDU_SETUP_INIT: {
- struct hal_tx_pcu_ppdu_setup_init *ppdu_setup =
- (struct hal_tx_pcu_ppdu_setup_init *)tlv_data;
+ const struct hal_tx_pcu_ppdu_setup_init *ppdu_setup = tlv_data;
u32 addr_32;
u16 addr_16;
@@ -1729,8 +1717,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_TX_QUEUE_EXTENSION: {
- struct hal_tx_queue_exten *tx_q_exten =
- (struct hal_tx_queue_exten *)tlv_data;
+ const struct hal_tx_queue_exten *tx_q_exten = tlv_data;
info[0] = __le32_to_cpu(tx_q_exten->info0);
@@ -1742,8 +1729,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_TX_FES_STATUS_START: {
- struct hal_tx_fes_status_start *tx_fes_start =
- (struct hal_tx_fes_status_start *)tlv_data;
+ const struct hal_tx_fes_status_start *tx_fes_start = tlv_data;
info[0] = __le32_to_cpu(tx_fes_start->info0);
@@ -1754,8 +1740,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_TX_FES_STATUS_PROT: {
- struct hal_tx_fes_status_prot *tx_fes_status =
- (struct hal_tx_fes_status_prot *)tlv_data;
+ const struct hal_tx_fes_status_prot *tx_fes_status = tlv_data;
u32 start_timestamp;
u32 end_timestamp;
@@ -1782,8 +1767,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
case HAL_TX_FES_STATUS_START_PPDU:
case HAL_TX_FES_STATUS_START_PROT: {
- struct hal_tx_fes_status_start_prot *tx_fes_stat_start =
- (struct hal_tx_fes_status_start_prot *)tlv_data;
+ const struct hal_tx_fes_status_start_prot *tx_fes_stat_start = tlv_data;
u64 ppdu_ts;
info[0] = __le32_to_cpu(tx_fes_stat_start->info0);
@@ -1798,8 +1782,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_TX_FES_STATUS_USER_PPDU: {
- struct hal_tx_fes_status_user_ppdu *tx_fes_usr_ppdu =
- (struct hal_tx_fes_status_user_ppdu *)tlv_data;
+ const struct hal_tx_fes_status_user_ppdu *tx_fes_usr_ppdu = tlv_data;
info[0] = __le32_to_cpu(tx_fes_usr_ppdu->info0);
@@ -1842,8 +1825,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
break;
case HAL_RX_FRAME_BITMAP_ACK: {
- struct hal_rx_frame_bitmap_ack *fbm_ack =
- (struct hal_rx_frame_bitmap_ack *)tlv_data;
+ const struct hal_rx_frame_bitmap_ack *fbm_ack = tlv_data;
u32 addr_32;
u16 addr_16;
@@ -1861,8 +1843,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_MACTX_PHY_DESC: {
- struct hal_tx_phy_desc *tx_phy_desc =
- (struct hal_tx_phy_desc *)tlv_data;
+ const struct hal_tx_phy_desc *tx_phy_desc = tlv_data;
info[0] = __le32_to_cpu(tx_phy_desc->info0);
info[1] = __le32_to_cpu(tx_phy_desc->info1);
@@ -1903,43 +1884,6 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
break;
}
- case HAL_MON_BUF_ADDR: {
- struct dp_rxdma_mon_ring *buf_ring = &ab->dp.tx_mon_buf_ring;
- struct dp_mon_packet_info *packet_info =
- (struct dp_mon_packet_info *)tlv_data;
- int buf_id = u32_get_bits(packet_info->cookie,
- DP_RXDMA_BUF_COOKIE_BUF_ID);
- struct sk_buff *msdu;
- struct dp_mon_mpdu *mon_mpdu = tx_ppdu_info->tx_mon_mpdu;
- struct ath12k_skb_rxcb *rxcb;
-
- spin_lock_bh(&buf_ring->idr_lock);
- msdu = idr_remove(&buf_ring->bufs_idr, buf_id);
- spin_unlock_bh(&buf_ring->idr_lock);
-
- if (unlikely(!msdu)) {
- ath12k_warn(ab, "monitor destination with invalid buf_id %d\n",
- buf_id);
- return DP_MON_TX_STATUS_PPDU_NOT_DONE;
- }
-
- rxcb = ATH12K_SKB_RXCB(msdu);
- dma_unmap_single(ab->dev, rxcb->paddr,
- msdu->len + skb_tailroom(msdu),
- DMA_FROM_DEVICE);
-
- if (!mon_mpdu->head)
- mon_mpdu->head = msdu;
- else if (mon_mpdu->tail)
- mon_mpdu->tail->next = msdu;
-
- mon_mpdu->tail = msdu;
-
- ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
- status = DP_MON_TX_BUFFER_ADDR;
- break;
- }
-
case HAL_TX_MPDU_END:
list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
&tx_ppdu_info->dp_tx_mon_mpdu_list);
@@ -1985,15 +1929,16 @@ ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar, int mac_id,
struct dp_mon_tx_ppdu_info *tx_ppdu_info)
{
struct dp_mon_mpdu *tmp, *mon_mpdu;
- struct sk_buff *head_msdu;
+ struct sk_buff *head_msdu, *tail_msdu;
list_for_each_entry_safe(mon_mpdu, tmp,
&tx_ppdu_info->dp_tx_mon_mpdu_list, list) {
list_del(&mon_mpdu->list);
head_msdu = mon_mpdu->head;
+ tail_msdu = mon_mpdu->tail;
if (head_msdu)
- ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu,
+ ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu, tail_msdu,
&tx_ppdu_info->rx_status, napi);
kfree(mon_mpdu);
@@ -2088,8 +2033,7 @@ int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget,
mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id];
buf_ring = &dp->rxdma_mon_buf_ring;
} else {
- mon_dst_ring = &pdev_dp->tx_mon_dst_ring[srng_id];
- buf_ring = &dp->tx_mon_buf_ring;
+ return 0;
}
srng = &ab->hal.srng_list[mon_dst_ring->ring_id];
@@ -2203,7 +2147,7 @@ ath12k_dp_mon_rx_update_peer_rate_table_stats(struct ath12k_rx_peer_stats *rx_st
}
static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar,
- struct ath12k_sta *arsta,
+ struct ath12k_link_sta *arsta,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
struct ath12k_rx_peer_stats *rx_stats = arsta->rx_stats;
@@ -2359,7 +2303,8 @@ ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar,
struct hal_rx_mon_ppdu_info *ppdu_info,
u32 uid)
{
- struct ath12k_sta *arsta = NULL;
+ struct ath12k_sta *ahsta;
+ struct ath12k_link_sta *arsta;
struct ath12k_rx_peer_stats *rx_stats = NULL;
struct hal_rx_user_status *user_stats = &ppdu_info->userstats[uid];
struct ath12k_peer *peer;
@@ -2376,7 +2321,8 @@ ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar,
return;
}
- arsta = ath12k_sta_to_arsta(peer->sta);
+ ahsta = ath12k_sta_to_ahsta(peer->sta);
+ arsta = &ahsta->deflink;
rx_stats = arsta->rx_stats;
if (!rx_stats)
@@ -2483,7 +2429,8 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
struct dp_srng *mon_dst_ring;
struct hal_srng *srng;
struct dp_rxdma_mon_ring *buf_ring;
- struct ath12k_sta *arsta = NULL;
+ struct ath12k_sta *ahsta = NULL;
+ struct ath12k_link_sta *arsta;
struct ath12k_peer *peer;
u64 cookie;
int num_buffs_reaped = 0, srng_id, buf_id;
@@ -2552,7 +2499,8 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
}
if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) {
- arsta = ath12k_sta_to_arsta(peer->sta);
+ ahsta = ath12k_sta_to_ahsta(peer->sta);
+ arsta = &ahsta->deflink;
ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta,
ppdu_info);
} else if ((ppdu_info->fc_valid) &&
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index ca76c018dd0c..dad35bfd83f6 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -17,6 +17,7 @@
#include "dp_tx.h"
#include "peer.h"
#include "dp_mon.h"
+#include "debugfs_htt_stats.h"
#define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
@@ -239,31 +240,61 @@ static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab,
return ab->hal_rx_ops->rx_desc_get_msdu_src_link_id(desc);
}
-static int ath12k_dp_purge_mon_ring(struct ath12k_base *ab)
+static void ath12k_dp_clean_up_skb_list(struct sk_buff_head *skb_list)
{
- int i, reaped = 0;
- unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(skb_list)))
+ dev_kfree_skb_any(skb);
+}
+
+static size_t ath12k_dp_list_cut_nodes(struct list_head *list,
+ struct list_head *head,
+ size_t count)
+{
+ struct list_head *cur;
+ struct ath12k_rx_desc_info *rx_desc;
+ size_t nodes = 0;
- do {
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++)
- reaped += ath12k_dp_mon_process_ring(ab, i, NULL,
- DP_MON_SERVICE_BUDGET,
- ATH12K_DP_RX_MONITOR_MODE);
+ if (!count) {
+ INIT_LIST_HEAD(list);
+ goto out;
+ }
- /* nothing more to reap */
- if (reaped < DP_MON_SERVICE_BUDGET)
- return 0;
+ list_for_each(cur, head) {
+ if (!count)
+ break;
- } while (time_before(jiffies, timeout));
+ rx_desc = list_entry(cur, struct ath12k_rx_desc_info, list);
+ rx_desc->in_use = true;
- ath12k_warn(ab, "dp mon ring purge timeout");
+ count--;
+ nodes++;
+ }
- return -ETIMEDOUT;
+ list_cut_before(list, head, cur);
+out:
+ return nodes;
+}
+
+static void ath12k_dp_rx_enqueue_free(struct ath12k_dp *dp,
+ struct list_head *used_list)
+{
+ struct ath12k_rx_desc_info *rx_desc, *safe;
+
+ /* Reset the use flag */
+ list_for_each_entry_safe(rx_desc, safe, used_list, list)
+ rx_desc->in_use = false;
+
+ spin_lock_bh(&dp->rx_desc_lock);
+ list_splice_tail(used_list, &dp->rx_desc_free_list);
+ spin_unlock_bh(&dp->rx_desc_lock);
}
/* Returns number of Rx buffers replenished */
int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
struct dp_rxdma_ring *rx_ring,
+ struct list_head *used_list,
int req_entries)
{
struct ath12k_buffer_addr *desc;
@@ -292,6 +323,19 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
req_entries = min(num_free, req_entries);
num_remain = req_entries;
+ if (!num_remain)
+ goto out;
+
+ /* Get the descriptor from free list */
+ if (list_empty(used_list)) {
+ spin_lock_bh(&dp->rx_desc_lock);
+ req_entries = ath12k_dp_list_cut_nodes(used_list,
+ &dp->rx_desc_free_list,
+ num_remain);
+ spin_unlock_bh(&dp->rx_desc_lock);
+ num_remain = req_entries;
+ }
+
while (num_remain > 0) {
skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
DP_RX_BUFFER_ALIGN_SIZE);
@@ -311,33 +355,20 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
if (dma_mapping_error(ab->dev, paddr))
goto fail_free_skb;
- spin_lock_bh(&dp->rx_desc_lock);
-
- /* Get desc from free list and store in used list
- * for cleanup purposes
- *
- * TODO: pass the removed descs rather than
- * add/read to optimize
- */
- rx_desc = list_first_entry_or_null(&dp->rx_desc_free_list,
+ rx_desc = list_first_entry_or_null(used_list,
struct ath12k_rx_desc_info,
list);
- if (!rx_desc) {
- spin_unlock_bh(&dp->rx_desc_lock);
+ if (!rx_desc)
goto fail_dma_unmap;
- }
rx_desc->skb = skb;
cookie = rx_desc->cookie;
- list_del(&rx_desc->list);
- list_add_tail(&rx_desc->list, &dp->rx_desc_used_list);
-
- spin_unlock_bh(&dp->rx_desc_lock);
desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
if (!desc)
- goto fail_buf_unassign;
+ goto fail_dma_unmap;
+ list_del(&rx_desc->list);
ATH12K_SKB_RXCB(skb)->paddr = paddr;
num_remain--;
@@ -345,26 +376,19 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
}
- ath12k_hal_srng_access_end(ab, srng);
-
- spin_unlock_bh(&srng->lock);
+ goto out;
- return req_entries - num_remain;
-
-fail_buf_unassign:
- spin_lock_bh(&dp->rx_desc_lock);
- list_del(&rx_desc->list);
- list_add_tail(&rx_desc->list, &dp->rx_desc_free_list);
- rx_desc->skb = NULL;
- spin_unlock_bh(&dp->rx_desc_lock);
fail_dma_unmap:
dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
fail_free_skb:
dev_kfree_skb_any(skb);
-
+out:
ath12k_hal_srng_access_end(ab, srng);
+ if (!list_empty(used_list))
+ ath12k_dp_rx_enqueue_free(dp, used_list);
+
spin_unlock_bh(&srng->lock);
return req_entries - num_remain;
@@ -399,8 +423,6 @@ static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab)
ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring);
- ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->tx_mon_buf_ring);
-
return 0;
}
@@ -422,13 +444,12 @@ static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab,
static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab,
struct dp_rxdma_ring *rx_ring)
{
- int num_entries;
+ LIST_HEAD(list);
- num_entries = rx_ring->refill_buf_ring.size /
- ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF);
+ rx_ring->bufs_max = rx_ring->refill_buf_ring.size /
+ ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF);
- rx_ring->bufs_max = num_entries;
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_entries);
+ ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0);
return 0;
}
@@ -454,15 +475,6 @@ static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
"failed to setup HAL_RXDMA_MONITOR_BUF\n");
return ret;
}
-
- ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab,
- &dp->tx_mon_buf_ring,
- HAL_TX_MONITOR_BUF);
- if (ret) {
- ath12k_warn(ab,
- "failed to setup HAL_TX_MONITOR_BUF\n");
- return ret;
- }
}
return 0;
@@ -474,10 +486,8 @@ static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar)
struct ath12k_base *ab = ar->ab;
int i;
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]);
- ath12k_dp_srng_cleanup(ab, &dp->tx_mon_dst_ring[i]);
- }
}
void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab)
@@ -521,7 +531,7 @@ static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
int ret;
u32 mac_id = dp->mac_id;
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ret = ath12k_dp_srng_setup(ar->ab,
&dp->rxdma_mon_dst_ring[i],
HAL_RXDMA_MONITOR_DST,
@@ -532,17 +542,6 @@ static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
"failed to setup HAL_RXDMA_MONITOR_DST\n");
return ret;
}
-
- ret = ath12k_dp_srng_setup(ar->ab,
- &dp->tx_mon_dst_ring[i],
- HAL_TX_MONITOR_DST,
- 0, mac_id + i,
- DP_TX_MONITOR_DEST_RING_SIZE);
- if (ret) {
- ath12k_warn(ar->ab,
- "failed to setup HAL_TX_MONITOR_DST\n");
- return ret;
- }
}
return 0;
@@ -741,15 +740,22 @@ static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u
{
struct ath12k_reo_queue_ref *qref;
struct ath12k_dp *dp = &ab->dp;
+ bool ml_peer = false;
if (!ab->hw_params->reoq_lut_support)
return;
- /* TODO: based on ML peer or not, select the LUT. below assumes non
- * ML peer
- */
- qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
- (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ if (peer_id & ATH12K_PEER_ML_ID_VALID) {
+ peer_id &= ~ATH12K_PEER_ML_ID_VALID;
+ ml_peer = true;
+ }
+
+ if (ml_peer)
+ qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ else
+ qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
qref->info0 = u32_encode_bits(lower_32_bits(paddr),
BUFFER_ADDR_INFO0_ADDR);
@@ -762,15 +768,22 @@ static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u
{
struct ath12k_reo_queue_ref *qref;
struct ath12k_dp *dp = &ab->dp;
+ bool ml_peer = false;
if (!ab->hw_params->reoq_lut_support)
return;
- /* TODO: based on ML peer or not, select the LUT. below assumes non
- * ML peer
- */
- qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
- (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ if (peer_id & ATH12K_PEER_ML_ID_VALID) {
+ peer_id &= ~ATH12K_PEER_ML_ID_VALID;
+ ml_peer = true;
+ }
+
+ if (ml_peer)
+ qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ else
+ qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
@@ -803,7 +816,10 @@ void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
rx_tid->vaddr = NULL;
}
- ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
+ if (peer->mlo)
+ ath12k_peer_rx_tid_qref_reset(ar->ab, peer->ml_id, tid);
+ else
+ ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
rx_tid->active = false;
}
@@ -941,7 +957,13 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
return -ENOENT;
}
- if (ab->hw_params->reoq_lut_support && !dp->reoq_lut.vaddr) {
+ if (!peer->primary_link) {
+ spin_unlock_bh(&ab->base_lock);
+ return 0;
+ }
+
+ if (ab->hw_params->reoq_lut_support &&
+ (!dp->reoq_lut.vaddr || !dp->ml_reoq_lut.vaddr)) {
spin_unlock_bh(&ab->base_lock);
ath12k_warn(ab, "reo qref table is not setup\n");
return -EINVAL;
@@ -1022,7 +1044,11 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
/* Update the REO queue LUT at the corresponding peer id
* and tid with qaddr.
*/
- ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);
+ if (peer->mlo)
+ ath12k_peer_rx_tid_qref_setup(ab, peer->ml_id, tid, paddr);
+ else
+ ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);
+
spin_unlock_bh(&ab->base_lock);
} else {
spin_unlock_bh(&ab->base_lock);
@@ -1039,16 +1065,27 @@ err_mem_free:
}
int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
- struct ieee80211_ampdu_params *params)
+ struct ieee80211_ampdu_params *params,
+ u8 link_id)
{
struct ath12k_base *ab = ar->ab;
- struct ath12k_sta *arsta = ath12k_sta_to_arsta(params->sta);
- int vdev_id = arsta->arvif->vdev_id;
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
+ struct ath12k_link_sta *arsta;
+ int vdev_id;
int ret;
- ret = ath12k_dp_rx_peer_tid_setup(ar, params->sta->addr, vdev_id,
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahsta->link[link_id]);
+ if (!arsta)
+ return -ENOLINK;
+
+ vdev_id = arsta->arvif->vdev_id;
+
+ ret = ath12k_dp_rx_peer_tid_setup(ar, arsta->addr, vdev_id,
params->tid, params->buf_size,
- params->ssn, arsta->pn_type);
+ params->ssn, arsta->ahsta->pn_type);
if (ret)
ath12k_warn(ab, "failed to setup rx tid %d\n", ret);
@@ -1056,18 +1093,29 @@ int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
}
int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
- struct ieee80211_ampdu_params *params)
+ struct ieee80211_ampdu_params *params,
+ u8 link_id)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_peer *peer;
- struct ath12k_sta *arsta = ath12k_sta_to_arsta(params->sta);
- int vdev_id = arsta->arvif->vdev_id;
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
+ struct ath12k_link_sta *arsta;
+ int vdev_id;
bool active;
int ret;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahsta->link[link_id]);
+ if (!arsta)
+ return -ENOLINK;
+
+ vdev_id = arsta->arvif->vdev_id;
+
spin_lock_bh(&ab->base_lock);
- peer = ath12k_peer_find(ab, vdev_id, params->sta->addr);
+ peer = ath12k_peer_find(ab, vdev_id, arsta->addr);
if (!peer) {
spin_unlock_bh(&ab->base_lock);
ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n");
@@ -1092,7 +1140,7 @@ int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
return ret;
}
-int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif,
+int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
const u8 *peer_addr,
enum set_key_cmd key_cmd,
struct ieee80211_key_conf *key)
@@ -1270,10 +1318,10 @@ static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab,
return 0;
}
-static int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
- int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
- const void *ptr, void *data),
- void *data)
+int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data)
{
const struct htt_tlv *tlv;
const void *begin = ptr;
@@ -1314,7 +1362,8 @@ ath12k_update_per_peer_tx_stats(struct ath12k *ar,
struct ath12k_base *ab = ar->ab;
struct ath12k_peer *peer;
struct ieee80211_sta *sta;
- struct ath12k_sta *arsta;
+ struct ath12k_sta *ahsta;
+ struct ath12k_link_sta *arsta;
struct htt_ppdu_stats_user_rate *user_rate;
struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
@@ -1395,7 +1444,8 @@ ath12k_update_per_peer_tx_stats(struct ath12k *ar,
}
sta = peer->sta;
- arsta = ath12k_sta_to_arsta(sta);
+ ahsta = ath12k_sta_to_ahsta(sta);
+ arsta = &ahsta->deflink;
memset(&arsta->txrate, 0, sizeof(arsta->txrate));
@@ -1647,7 +1697,11 @@ static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
rcu_read_lock();
ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
if (!ar) {
- ath12k_warn(ab, "invalid pdev id %d on htt mlo offset\n", pdev_id);
+ /* It is possible that the ar is not yet active (started).
+ * The above function will only look for the active pdev
+ * and hence %NULL return is possible. Just silently
+ * discard this message
+ */
goto exit;
}
@@ -1743,6 +1797,7 @@ void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
ath12k_htt_pull_ppdu_stats(ab, skb);
break;
case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
+ ath12k_debugfs_htt_ext_stats_handler(ab, skb);
break;
case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
ath12k_htt_mlo_offset_event_handler(ab, skb);
@@ -2361,8 +2416,10 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
channel_num = meta_data;
center_freq = meta_data >> 16;
- if (center_freq >= 5935 && center_freq <= 7105) {
+ if (center_freq >= ATH12K_MIN_6G_FREQ &&
+ center_freq <= ATH12K_MAX_6G_FREQ) {
rx_status->band = NL80211_BAND_6GHZ;
+ rx_status->freq = center_freq;
} else if (channel_num >= 1 && channel_num <= 14) {
rx_status->band = NL80211_BAND_2GHZ;
} else if (channel_num >= 36 && channel_num <= 173) {
@@ -2380,8 +2437,9 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
rx_desc, sizeof(*rx_desc));
}
- rx_status->freq = ieee80211_channel_to_frequency(channel_num,
- rx_status->band);
+ if (rx_status->band != NL80211_BAND_6GHZ)
+ rx_status->freq = ieee80211_channel_to_frequency(channel_num,
+ rx_status->band);
ath12k_dp_rx_h_rate(ar, rx_desc, rx_status);
}
@@ -2420,10 +2478,15 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
pubsta = peer ? peer->sta : NULL;
+ if (pubsta && pubsta->valid_links) {
+ status->link_valid = 1;
+ status->link_id = peer->link_id;
+ }
+
spin_unlock_bh(&ab->base_lock);
ath12k_dbg(ab, ATH12K_DBG_DATA,
- "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
msdu,
msdu->len,
peer ? peer->addr : NULL,
@@ -2541,11 +2604,14 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
struct sk_buff_head *msdu_list,
int ring_id)
{
+ struct ath12k_hw_group *ag = ab->ag;
struct ieee80211_rx_status rx_status = {0};
struct ath12k_skb_rxcb *rxcb;
struct sk_buff *msdu;
struct ath12k *ar;
- u8 mac_id, pdev_id;
+ struct ath12k_hw_link *hw_links = ag->hw_links;
+ struct ath12k_base *partner_ab;
+ u8 hw_link_id, pdev_id;
int ret;
if (skb_queue_empty(msdu_list))
@@ -2555,15 +2621,18 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
while ((msdu = __skb_dequeue(msdu_list))) {
rxcb = ATH12K_SKB_RXCB(msdu);
- mac_id = rxcb->mac_id;
- pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
- ar = ab->pdevs[pdev_id].ar;
- if (!rcu_dereference(ab->pdevs_active[pdev_id])) {
+ hw_link_id = rxcb->hw_link_id;
+ partner_ab = ath12k_ag_to_ab(ag,
+ hw_links[hw_link_id].device_id);
+ pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
+ hw_links[hw_link_id].pdev_idx);
+ ar = partner_ab->pdevs[pdev_id].ar;
+ if (!rcu_dereference(partner_ab->pdevs_active[pdev_id])) {
dev_kfree_skb_any(msdu);
continue;
}
- if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+ if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
dev_kfree_skb_any(msdu);
continue;
}
@@ -2582,25 +2651,55 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
rcu_read_unlock();
}
+static u16 ath12k_dp_rx_get_peer_id(struct ath12k_base *ab,
+ enum ath12k_peer_metadata_version ver,
+ __le32 peer_metadata)
+{
+ switch (ver) {
+ default:
+ ath12k_warn(ab, "Unknown peer metadata version: %d", ver);
+ fallthrough;
+ case ATH12K_PEER_METADATA_V0:
+ return le32_get_bits(peer_metadata,
+ RX_MPDU_DESC_META_DATA_V0_PEER_ID);
+ case ATH12K_PEER_METADATA_V1:
+ return le32_get_bits(peer_metadata,
+ RX_MPDU_DESC_META_DATA_V1_PEER_ID);
+ case ATH12K_PEER_METADATA_V1A:
+ return le32_get_bits(peer_metadata,
+ RX_MPDU_DESC_META_DATA_V1A_PEER_ID);
+ case ATH12K_PEER_METADATA_V1B:
+ return le32_get_bits(peer_metadata,
+ RX_MPDU_DESC_META_DATA_V1B_PEER_ID);
+ }
+}
+
int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
struct napi_struct *napi, int budget)
{
+ struct ath12k_hw_group *ag = ab->ag;
+ struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
+ struct ath12k_hw_link *hw_links = ag->hw_links;
+ int num_buffs_reaped[ATH12K_MAX_SOCS] = {};
struct ath12k_rx_desc_info *desc_info;
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
struct hal_reo_dest_ring *desc;
- int num_buffs_reaped = 0;
+ struct ath12k_base *partner_ab;
struct sk_buff_head msdu_list;
struct ath12k_skb_rxcb *rxcb;
int total_msdu_reaped = 0;
+ u8 hw_link_id, device_id;
struct hal_srng *srng;
struct sk_buff *msdu;
bool done = false;
- int mac_id;
u64 desc_va;
__skb_queue_head_init(&msdu_list);
+ for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++)
+ INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
+
srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
spin_lock_bh(&srng->lock);
@@ -2609,24 +2708,38 @@ try_again:
ath12k_hal_srng_access_begin(ab, srng);
while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
+ struct rx_mpdu_desc *mpdu_info;
+ struct rx_msdu_desc *msdu_info;
enum hal_reo_dest_ring_push_reason push_reason;
u32 cookie;
cookie = le32_get_bits(desc->buf_addr_info.info1,
BUFFER_ADDR_INFO1_SW_COOKIE);
- mac_id = le32_get_bits(desc->info0,
- HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
+ hw_link_id = le32_get_bits(desc->info0,
+ HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
le32_to_cpu(desc->buf_va_lo));
desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
+ device_id = hw_links[hw_link_id].device_id;
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+ if (unlikely(!partner_ab)) {
+ if (desc_info->skb) {
+ dev_kfree_skb_any(desc_info->skb);
+ desc_info->skb = NULL;
+ }
+
+ continue;
+ }
+
/* retry manual desc retrieval */
if (!desc_info) {
- desc_info = ath12k_dp_get_rx_desc(ab, cookie);
+ desc_info = ath12k_dp_get_rx_desc(partner_ab, cookie);
if (!desc_info) {
- ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
+ ath12k_warn(partner_ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
+ cookie);
continue;
}
}
@@ -2637,36 +2750,37 @@ try_again:
msdu = desc_info->skb;
desc_info->skb = NULL;
- spin_lock_bh(&dp->rx_desc_lock);
- list_move_tail(&desc_info->list, &dp->rx_desc_free_list);
- spin_unlock_bh(&dp->rx_desc_lock);
+ list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]);
rxcb = ATH12K_SKB_RXCB(msdu);
- dma_unmap_single(ab->dev, rxcb->paddr,
+ dma_unmap_single(partner_ab->dev, rxcb->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
- num_buffs_reaped++;
+ num_buffs_reaped[device_id]++;
push_reason = le32_get_bits(desc->info0,
HAL_REO_DEST_RING_INFO0_PUSH_REASON);
if (push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
dev_kfree_skb_any(msdu);
- ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
+ ab->soc_stats.hal_reo_error[ring_id]++;
continue;
}
- rxcb->is_first_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
+ msdu_info = &desc->rx_msdu_info;
+ mpdu_info = &desc->rx_mpdu_info;
+
+ rxcb->is_first_msdu = !!(le32_to_cpu(msdu_info->info0) &
RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
- rxcb->is_last_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
+ rxcb->is_last_msdu = !!(le32_to_cpu(msdu_info->info0) &
RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
- rxcb->is_continuation = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
+ rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) &
RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
- rxcb->mac_id = mac_id;
- rxcb->peer_id = le32_get_bits(desc->rx_mpdu_info.peer_meta_data,
- RX_MPDU_DESC_META_DATA_PEER_ID);
- rxcb->tid = le32_get_bits(desc->rx_mpdu_info.info0,
+ rxcb->hw_link_id = hw_link_id;
+ rxcb->peer_id = ath12k_dp_rx_get_peer_id(ab, dp->peer_metadata_ver,
+ mpdu_info->peer_meta_data);
+ rxcb->tid = le32_get_bits(mpdu_info->info0,
RX_MPDU_DESC_INFO0_TID);
__skb_queue_tail(&msdu_list, msdu);
@@ -2700,7 +2814,17 @@ try_again:
if (!total_msdu_reaped)
goto exit;
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_buffs_reaped);
+ for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) {
+ if (!num_buffs_reaped[device_id])
+ continue;
+
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+ rx_ring = &partner_ab->dp.rx_refill_buf_ring;
+
+ ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring,
+ &rx_desc_used_list[device_id],
+ num_buffs_reaped[device_id]);
+ }
ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list,
ring_id);
@@ -2740,10 +2864,17 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev
peer = ath12k_peer_find(ab, vdev_id, peer_mac);
if (!peer) {
spin_unlock_bh(&ab->base_lock);
+ crypto_free_shash(tfm);
ath12k_warn(ab, "failed to find the peer to set up fragment info\n");
return -ENOENT;
}
+ if (!peer->primary_link) {
+ spin_unlock_bh(&ab->base_lock);
+ crypto_free_shash(tfm);
+ return 0;
+ }
+
for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
rx_tid = &peer->rx_tid[i];
rx_tid->ab = ab;
@@ -2969,9 +3100,10 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
struct hal_srng *srng;
dma_addr_t link_paddr, buf_paddr;
u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info;
- u32 cookie, hal_rx_desc_sz, dest_ring_info0;
+ u32 cookie, hal_rx_desc_sz, dest_ring_info0, queue_addr_hi;
int ret;
struct ath12k_rx_desc_info *desc_info;
+ enum hal_rx_buf_return_buf_manager idle_link_rbm = dp->idle_link_rbm;
u8 dst_ind;
hal_rx_desc_sz = ab->hal.hal_desc_sz;
@@ -3005,7 +3137,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
buf_paddr = dma_map_single(ab->dev, defrag_skb->data,
defrag_skb->len + skb_tailroom(defrag_skb),
- DMA_FROM_DEVICE);
+ DMA_TO_DEVICE);
if (dma_mapping_error(ab->dev, buf_paddr))
return -ENOMEM;
@@ -3021,9 +3153,9 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
}
desc_info->skb = defrag_skb;
+ desc_info->in_use = true;
list_del(&desc_info->list);
- list_add_tail(&desc_info->list, &dp->rx_desc_used_list);
spin_unlock_bh(&dp->rx_desc_lock);
ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr;
@@ -3049,7 +3181,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr,
cookie,
- HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST);
+ idle_link_rbm);
mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) |
u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) |
@@ -3061,13 +3193,11 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
reo_ent_ring->rx_mpdu_info.peer_meta_data =
reo_dest_ring->rx_mpdu_info.peer_meta_data;
- /* Firmware expects physical address to be filled in queue_addr_lo in
- * the MLO scenario and in case of non MLO peer meta data needs to be
- * filled.
- * TODO: Need to handle for MLO scenario.
- */
- reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data;
- reo_ent_ring->info0 = le32_encode_bits(dst_ind,
+ reo_ent_ring->queue_addr_lo = cpu_to_le32(lower_32_bits(rx_tid->paddr));
+ queue_addr_hi = upper_32_bits(rx_tid->paddr);
+ reo_ent_ring->info0 = le32_encode_bits(queue_addr_hi,
+ HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) |
+ le32_encode_bits(dst_ind,
HAL_REO_ENTR_RING_INFO0_DEST_IND);
reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn,
@@ -3085,13 +3215,13 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
err_free_desc:
spin_lock_bh(&dp->rx_desc_lock);
- list_del(&desc_info->list);
- list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
+ desc_info->in_use = false;
desc_info->skb = NULL;
+ list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
spin_unlock_bh(&dp->rx_desc_lock);
err_unmap_dma:
dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
- DMA_FROM_DEVICE);
+ DMA_TO_DEVICE);
return ret;
}
@@ -3304,6 +3434,7 @@ out_unlock:
static int
ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
+ struct list_head *used_list,
bool drop, u32 cookie)
{
struct ath12k_base *ab = ar->ab;
@@ -3323,7 +3454,8 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
if (!desc_info) {
desc_info = ath12k_dp_get_rx_desc(ab, cookie);
if (!desc_info) {
- ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
+ ath12k_warn(ab, "Invalid cookie in DP rx error descriptor retrieval: 0x%x\n",
+ cookie);
return -EINVAL;
}
}
@@ -3333,9 +3465,8 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
msdu = desc_info->skb;
desc_info->skb = NULL;
- spin_lock_bh(&ab->dp.rx_desc_lock);
- list_move_tail(&desc_info->list, &ab->dp.rx_desc_free_list);
- spin_unlock_bh(&ab->dp.rx_desc_lock);
+
+ list_add_tail(&desc_info->list, used_list);
rxcb = ATH12K_SKB_RXCB(msdu);
dma_unmap_single(ar->ab->dev, rxcb->paddr,
@@ -3353,7 +3484,7 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
goto exit;
}
- if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+ if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
dev_kfree_skb_any(msdu);
goto exit;
}
@@ -3383,7 +3514,10 @@ exit:
int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
int budget)
{
+ struct ath12k_hw_group *ag = ab->ag;
+ struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
+ int num_buffs_reaped[ATH12K_MAX_SOCS] = {};
struct dp_link_desc_bank *link_desc_banks;
enum hal_rx_buf_return_buf_manager rbm;
struct hal_rx_msdu_link *link_desc_va;
@@ -3391,22 +3525,24 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
struct hal_reo_dest_ring *reo_desc;
struct dp_rxdma_ring *rx_ring;
struct dp_srng *reo_except;
+ struct ath12k_hw_link *hw_links = ag->hw_links;
+ struct ath12k_base *partner_ab;
+ u8 hw_link_id, device_id;
u32 desc_bank, num_msdus;
struct hal_srng *srng;
- struct ath12k_dp *dp;
- int mac_id;
struct ath12k *ar;
dma_addr_t paddr;
bool is_frag;
- bool drop = false;
+ bool drop;
int pdev_id;
tot_n_bufs_reaped = 0;
quota = budget;
- dp = &ab->dp;
- reo_except = &dp->reo_except_ring;
- link_desc_banks = dp->link_desc_banks;
+ for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++)
+ INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
+
+ reo_except = &ab->dp.reo_except_ring;
srng = &ab->hal.srng_list[reo_except->ring_id];
@@ -3416,7 +3552,9 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
while (budget &&
(reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
+ drop = false;
ab->soc_stats.err_ring_pkts++;
+
ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr,
&desc_bank);
if (ret) {
@@ -3424,16 +3562,27 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
ret);
continue;
}
+
+ hw_link_id = le32_get_bits(reo_desc->info0,
+ HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
+ device_id = hw_links[hw_link_id].device_id;
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+
+ pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
+ hw_links[hw_link_id].pdev_idx);
+ ar = partner_ab->pdevs[pdev_id].ar;
+
+ link_desc_banks = partner_ab->dp.link_desc_banks;
link_desc_va = link_desc_banks[desc_bank].vaddr +
(paddr - link_desc_banks[desc_bank].paddr);
ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
&rbm);
- if (rbm != HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST &&
+ if (rbm != partner_ab->dp.idle_link_rbm &&
rbm != HAL_RX_BUF_RBM_SW3_BM &&
- rbm != ab->hw_params->hal_params->rx_buf_rbm) {
+ rbm != partner_ab->hw_params->hal_params->rx_buf_rbm) {
ab->soc_stats.invalid_rbm++;
ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
- ath12k_dp_rx_link_desc_return(ab, reo_desc,
+ ath12k_dp_rx_link_desc_return(partner_ab, reo_desc,
HAL_WBM_REL_BM_ACT_REL_MSDU);
continue;
}
@@ -3443,24 +3592,26 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
/* Process only rx fragments with one msdu per link desc below, and drop
* msdu's indicated due to error reasons.
+ * Dynamic fragmentation not supported in Multi-link client, so drop the
+ * partner device buffers.
*/
- if (!is_frag || num_msdus > 1) {
+ if (!is_frag || num_msdus > 1 ||
+ partner_ab->device_id != ab->device_id) {
drop = true;
+
/* Return the link desc back to wbm idle list */
- ath12k_dp_rx_link_desc_return(ab, reo_desc,
+ ath12k_dp_rx_link_desc_return(partner_ab, reo_desc,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
for (i = 0; i < num_msdus; i++) {
- mac_id = le32_get_bits(reo_desc->info0,
- HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
-
- pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
- ar = ab->pdevs[pdev_id].ar;
-
- if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, drop,
- msdu_cookies[i]))
+ if (!ath12k_dp_process_rx_err_buf(ar, reo_desc,
+ &rx_desc_used_list[device_id],
+ drop,
+ msdu_cookies[i])) {
+ num_buffs_reaped[device_id]++;
tot_n_bufs_reaped++;
+ }
}
if (tot_n_bufs_reaped >= quota) {
@@ -3476,9 +3627,17 @@ exit:
spin_unlock_bh(&srng->lock);
- rx_ring = &dp->rx_refill_buf_ring;
+ for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) {
+ if (!num_buffs_reaped[device_id])
+ continue;
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, tot_n_bufs_reaped);
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+ rx_ring = &partner_ab->dp.rx_refill_buf_ring;
+
+ ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring,
+ &rx_desc_used_list[device_id],
+ num_buffs_reaped[device_id]);
+ }
return tot_n_bufs_reaped;
}
@@ -3695,25 +3854,33 @@ static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
struct napi_struct *napi, int budget)
{
+ struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
+ struct ath12k_hw_group *ag = ab->ag;
struct ath12k *ar;
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring;
struct hal_rx_wbm_rel_info err_info;
struct hal_srng *srng;
struct sk_buff *msdu;
- struct sk_buff_head msdu_list;
+ struct sk_buff_head msdu_list, scatter_msdu_list;
struct ath12k_skb_rxcb *rxcb;
void *rx_desc;
- u8 mac_id;
- int num_buffs_reaped = 0;
+ int num_buffs_reaped[ATH12K_MAX_SOCS] = {};
+ int total_num_buffs_reaped = 0;
struct ath12k_rx_desc_info *desc_info;
+ struct ath12k_hw_link *hw_links = ag->hw_links;
+ struct ath12k_base *partner_ab;
+ u8 hw_link_id, device_id;
int ret, pdev_id;
+ struct hal_rx_desc *msdu_data;
__skb_queue_head_init(&msdu_list);
+ __skb_queue_head_init(&scatter_msdu_list);
- srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
- rx_ring = &dp->rx_refill_buf_ring;
+ for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++)
+ INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
+ srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
@@ -3737,7 +3904,8 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
if (!desc_info) {
desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie);
if (!desc_info) {
- ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
+ ath12k_warn(ab, "Invalid cookie in DP WBM rx error descriptor retrieval: 0x%x\n",
+ err_info.cookie);
continue;
}
}
@@ -3748,16 +3916,27 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
msdu = desc_info->skb;
desc_info->skb = NULL;
- spin_lock_bh(&dp->rx_desc_lock);
- list_move_tail(&desc_info->list, &dp->rx_desc_free_list);
- spin_unlock_bh(&dp->rx_desc_lock);
+ device_id = desc_info->device_id;
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+ if (unlikely(!partner_ab)) {
+ dev_kfree_skb_any(msdu);
+
+ /* In any case continuation bit is set
+ * in the previous record, cleanup scatter_msdu_list
+ */
+ ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
+ continue;
+ }
+
+ list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]);
rxcb = ATH12K_SKB_RXCB(msdu);
- dma_unmap_single(ab->dev, rxcb->paddr,
+ dma_unmap_single(partner_ab->dev, rxcb->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
- num_buffs_reaped++;
+ num_buffs_reaped[device_id]++;
+ total_num_buffs_reaped++;
if (!err_info.continuation)
budget--;
@@ -3768,39 +3947,97 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
continue;
}
+ msdu_data = (struct hal_rx_desc *)msdu->data;
rxcb->err_rel_src = err_info.err_rel_src;
rxcb->err_code = err_info.err_code;
- rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
-
- __skb_queue_tail(&msdu_list, msdu);
-
rxcb->is_first_msdu = err_info.first_msdu;
rxcb->is_last_msdu = err_info.last_msdu;
rxcb->is_continuation = err_info.continuation;
+ rxcb->rx_desc = msdu_data;
+
+ if (err_info.continuation) {
+ __skb_queue_tail(&scatter_msdu_list, msdu);
+ continue;
+ }
+
+ hw_link_id = ath12k_dp_rx_get_msdu_src_link(partner_ab,
+ msdu_data);
+ if (hw_link_id >= ATH12K_GROUP_MAX_RADIO) {
+ dev_kfree_skb_any(msdu);
+
+ /* In any case continuation bit is set
+ * in the previous record, cleanup scatter_msdu_list
+ */
+ ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
+ continue;
+ }
+
+ if (!skb_queue_empty(&scatter_msdu_list)) {
+ struct sk_buff *msdu;
+
+ skb_queue_walk(&scatter_msdu_list, msdu) {
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ rxcb->hw_link_id = hw_link_id;
+ }
+
+ skb_queue_splice_tail_init(&scatter_msdu_list,
+ &msdu_list);
+ }
+
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ rxcb->hw_link_id = hw_link_id;
+ __skb_queue_tail(&msdu_list, msdu);
}
+ /* In any case continuation bit is set in the
+ * last record, cleanup scatter_msdu_list
+ */
+ ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
+
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
- if (!num_buffs_reaped)
+ if (!total_num_buffs_reaped)
goto done;
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_buffs_reaped);
+ for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) {
+ if (!num_buffs_reaped[device_id])
+ continue;
+
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+ rx_ring = &partner_ab->dp.rx_refill_buf_ring;
+
+ ath12k_dp_rx_bufs_replenish(ab, rx_ring,
+ &rx_desc_used_list[device_id],
+ num_buffs_reaped[device_id]);
+ }
rcu_read_lock();
while ((msdu = __skb_dequeue(&msdu_list))) {
- mac_id = ath12k_dp_rx_get_msdu_src_link(ab,
- (struct hal_rx_desc *)msdu->data);
- pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
- ar = ab->pdevs[pdev_id].ar;
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ hw_link_id = rxcb->hw_link_id;
+
+ device_id = hw_links[hw_link_id].device_id;
+ partner_ab = ath12k_ag_to_ab(ag, device_id);
+ if (unlikely(!partner_ab)) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "Unable to process WBM error msdu due to invalid hw link id %d device id %d\n",
+ hw_link_id, device_id);
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
+ hw_links[hw_link_id].pdev_idx);
+ ar = partner_ab->pdevs[pdev_id].ar;
- if (!ar || !rcu_dereference(ar->ab->pdevs_active[mac_id])) {
+ if (!ar || !rcu_dereference(ar->ab->pdevs_active[hw_link_id])) {
dev_kfree_skb_any(msdu);
continue;
}
- if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+ if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
dev_kfree_skb_any(msdu);
continue;
}
@@ -3808,7 +4045,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
}
rcu_read_unlock();
done:
- return num_buffs_reaped;
+ return total_num_buffs_reaped;
}
void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
@@ -3830,7 +4067,7 @@ void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
ath12k_hal_srng_access_begin(ab, srng);
while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
- tag = u64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
+ tag = le64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
switch (tag) {
case HAL_REO_GET_QUEUE_STATS_STATUS:
@@ -3897,7 +4134,7 @@ void ath12k_dp_rx_free(struct ath12k_base *ab)
ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
if (ab->hw_params->rx_mac_buf_ring)
ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
}
@@ -3906,7 +4143,6 @@ void ath12k_dp_rx_free(struct ath12k_base *ab)
ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
- ath12k_dp_srng_cleanup(ab, &dp->tx_mon_buf_ring.refill_buf_ring);
ath12k_dp_rxdma_buf_free(ab);
}
@@ -3964,7 +4200,7 @@ int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
struct ath12k_dp *dp = &ab->dp;
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 ring_id;
- int ret;
+ int ret = 0;
u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
int i;
@@ -3990,7 +4226,7 @@ int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
* and modify the rx_desc struct
*/
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ring_id = dp->rx_mac_buf_ring[i].ring_id;
ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i,
HAL_RXDMA_BUF,
@@ -4017,7 +4253,7 @@ int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
}
if (ab->hw_params->rx_mac_buf_ring) {
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ring_id = dp->rx_mac_buf_ring[i].ring_id;
ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
i, HAL_RXDMA_BUF);
@@ -4049,15 +4285,6 @@ int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
ret);
return ret;
}
-
- ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id;
- ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
- 0, HAL_TX_MONITOR_BUF);
- if (ret) {
- ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
- ret);
- return ret;
- }
}
ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab);
@@ -4077,9 +4304,6 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab)
idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
- idr_init(&dp->tx_mon_buf_ring.bufs_idr);
- spin_lock_init(&dp->tx_mon_buf_ring.idr_lock);
-
ret = ath12k_dp_srng_setup(ab,
&dp->rx_refill_buf_ring.refill_buf_ring,
HAL_RXDMA_BUF, 0, 0,
@@ -4090,7 +4314,7 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab)
}
if (ab->hw_params->rx_mac_buf_ring) {
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ret = ath12k_dp_srng_setup(ab,
&dp->rx_mac_buf_ring[i],
HAL_RXDMA_BUF, 1,
@@ -4122,15 +4346,6 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab)
ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n");
return ret;
}
-
- ret = ath12k_dp_srng_setup(ab,
- &dp->tx_mon_buf_ring.refill_buf_ring,
- HAL_TX_MONITOR_BUF, 0, 0,
- DP_TX_MONITOR_BUF_RING_SIZE);
- if (ret) {
- ath12k_warn(ab, "failed to setup DP_TX_MONITOR_BUF_RING_SIZE\n");
- return ret;
- }
}
ret = ath12k_dp_rxdma_buf_setup(ab);
@@ -4159,7 +4374,7 @@ int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id)
return ret;
}
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ring_id = dp->rxdma_mon_dst_ring[i].ring_id;
ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
mac_id + i,
@@ -4170,17 +4385,6 @@ int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id)
i, ret);
return ret;
}
-
- ring_id = dp->tx_mon_dst_ring[i].ring_id;
- ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
- mac_id + i,
- HAL_TX_MONITOR_DST);
- if (ret) {
- ath12k_warn(ab,
- "failed to configure tx_mon_dst_ring %d %d\n",
- i, ret);
- return ret;
- }
}
out:
return 0;
@@ -4224,29 +4428,3 @@ int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar)
return 0;
}
-
-int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab)
-{
- /* start reap timer */
- mod_timer(&ab->mon_reap_timer,
- jiffies + msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL));
-
- return 0;
-}
-
-int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer)
-{
- int ret;
-
- if (stop_timer)
- del_timer_sync(&ab->mon_reap_timer);
-
- /* reap all the monitor related rings */
- ret = ath12k_dp_purge_mon_ring(ab);
- if (ret) {
- ath12k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h
index 05b3d5581dbe..1ce82088c954 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_RX_H
#define ATH12K_DP_RX_H
@@ -85,10 +85,12 @@ static inline u32 ath12k_he_gi_to_nl80211_he_gi(u8 sgi)
}
int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
- struct ieee80211_ampdu_params *params);
+ struct ieee80211_ampdu_params *params,
+ u8 link_id);
int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
- struct ieee80211_ampdu_params *params);
-int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif,
+ struct ieee80211_ampdu_params *params,
+ u8 link_id);
+int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
const u8 *peer_addr,
enum set_key_cmd key_cmd,
struct ieee80211_key_conf *key);
@@ -118,12 +120,11 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int mac_id,
int budget);
int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
struct dp_rxdma_ring *rx_ring,
+ struct list_head *used_list,
int req_entries);
int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar);
int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id);
-int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab);
-int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer);
u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
struct hal_rx_desc *desc);
struct ath12k_peer *
@@ -140,4 +141,8 @@ ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu);
int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab);
int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab);
+int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data);
#endif /* ATH12K_DP_RX_H */
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
index 572b87153647..a8d341a6df01 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
@@ -10,7 +10,7 @@
#include "hw.h"
static enum hal_tcl_encap_type
-ath12k_dp_tx_get_encap_type(struct ath12k_vif *arvif, struct sk_buff *skb)
+ath12k_dp_tx_get_encap_type(struct ath12k_link_vif *arvif, struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath12k_base *ab = arvif->ar->ab;
@@ -124,7 +124,99 @@ static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab,
HAL_TX_MSDU_EXT_INFO1_ENCRYPT_TYPE);
}
-int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
+#define HTT_META_DATA_ALIGNMENT 0x8
+
+static void *ath12k_dp_metadata_align_skb(struct sk_buff *skb, u8 tail_len)
+{
+ struct sk_buff *tail;
+ void *metadata;
+
+ if (unlikely(skb_cow_data(skb, tail_len, &tail) < 0))
+ return NULL;
+
+ metadata = pskb_put(skb, tail, tail_len);
+ memset(metadata, 0, tail_len);
+ return metadata;
+}
+
+/* Preparing HTT Metadata when utilized with ext MSDU */
+static int ath12k_dp_prepare_htt_metadata(struct sk_buff *skb)
+{
+ struct hal_tx_msdu_metadata *desc_ext;
+ u8 htt_desc_size;
+ /* Size rounded of multiple of 8 bytes */
+ u8 htt_desc_size_aligned;
+
+ htt_desc_size = sizeof(struct hal_tx_msdu_metadata);
+ htt_desc_size_aligned = ALIGN(htt_desc_size, HTT_META_DATA_ALIGNMENT);
+
+ desc_ext = ath12k_dp_metadata_align_skb(skb, htt_desc_size_aligned);
+ if (!desc_ext)
+ return -ENOMEM;
+
+ desc_ext->info0 = le32_encode_bits(1, HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_FLAG) |
+ le32_encode_bits(0, HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_TYPE) |
+ le32_encode_bits(1,
+ HAL_TX_MSDU_METADATA_INFO0_HOST_TX_DESC_POOL);
+
+ return 0;
+}
+
+static void ath12k_dp_tx_move_payload(struct sk_buff *skb,
+ unsigned long delta,
+ bool head)
+{
+ unsigned long len = skb->len;
+
+ if (head) {
+ skb_push(skb, delta);
+ memmove(skb->data, skb->data + delta, len);
+ skb_trim(skb, len);
+ } else {
+ skb_put(skb, delta);
+ memmove(skb->data + delta, skb->data, len);
+ skb_pull(skb, delta);
+ }
+}
+
+static int ath12k_dp_tx_align_payload(struct ath12k_base *ab,
+ struct sk_buff **pskb)
+{
+ u32 iova_mask = ab->hw_params->iova_mask;
+ unsigned long offset, delta1, delta2;
+ struct sk_buff *skb2, *skb = *pskb;
+ unsigned int headroom = skb_headroom(skb);
+ int tailroom = skb_tailroom(skb);
+ int ret = 0;
+
+ offset = (unsigned long)skb->data & iova_mask;
+ delta1 = offset;
+ delta2 = iova_mask - offset + 1;
+
+ if (headroom >= delta1) {
+ ath12k_dp_tx_move_payload(skb, delta1, true);
+ } else if (tailroom >= delta2) {
+ ath12k_dp_tx_move_payload(skb, delta2, false);
+ } else {
+ skb2 = skb_realloc_headroom(skb, iova_mask);
+ if (!skb2) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dev_kfree_skb_any(skb);
+
+ offset = (unsigned long)skb2->data & iova_mask;
+ if (offset)
+ ath12k_dp_tx_move_payload(skb2, offset, true);
+ *pskb = skb2;
+ }
+
+out:
+ return ret;
+}
+
+int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
struct sk_buff *skb)
{
struct ath12k_base *ab = ar->ab;
@@ -138,6 +230,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
struct sk_buff *skb_ext_desc;
struct hal_srng *tcl_ring;
struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ath12k_vif *ahvif = arvif->ahvif;
struct dp_tx_ring *tx_ring;
u8 pool_id;
u8 hal_ring_id;
@@ -145,6 +238,8 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
u8 ring_selector, ring_map = 0;
bool tcl_ring_retry;
bool msdu_ext_desc = false;
+ bool add_htt_metadata = false;
+ u32 iova_mask = ab->hw_params->iova_mask;
if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
return -ESHUTDOWN;
@@ -180,7 +275,7 @@ tcl_ring_sel:
ti.bank_id = arvif->bank_id;
ti.meta_data_flags = arvif->tcl_metadata;
- if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
+ if (ahvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) {
if (skb_cb->flags & ATH12K_SKB_CIPHER_SET) {
ti.encrypt_type =
@@ -240,6 +335,23 @@ tcl_ring_sel:
goto fail_remove_tx_buf;
}
+ if (iova_mask &&
+ (unsigned long)skb->data & iova_mask) {
+ ret = ath12k_dp_tx_align_payload(ab, &skb);
+ if (ret) {
+ ath12k_warn(ab, "failed to align TX buffer %d\n", ret);
+ /* don't bail out, give original buffer
+ * a chance even unaligned.
+ */
+ goto map;
+ }
+
+ /* hdr is pointing to a wrong place after alignment,
+ * so refresh it for later use.
+ */
+ hdr = (void *)skb->data;
+ }
+map:
ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(ab->dev, ti.paddr)) {
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
@@ -248,12 +360,24 @@ tcl_ring_sel:
goto fail_remove_tx_buf;
}
+ if (!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
+ !(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) &&
+ !(skb_cb->flags & ATH12K_SKB_CIPHER_SET) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ /* Add metadata for sw encrypted vlan group traffic */
+ add_htt_metadata = true;
+ msdu_ext_desc = true;
+ ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TO_FW);
+ ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW;
+ ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+ }
+
tx_desc->skb = skb;
tx_desc->mac_id = ar->pdev_idx;
ti.desc_id = tx_desc->desc_id;
ti.data_len = skb->len;
skb_cb->paddr = ti.paddr;
- skb_cb->vif = arvif->vif;
+ skb_cb->vif = ahvif->vif;
skb_cb->ar = ar;
if (msdu_ext_desc) {
@@ -269,6 +393,15 @@ tcl_ring_sel:
msg = (struct hal_tx_msdu_ext_desc *)skb_ext_desc->data;
ath12k_hal_tx_cmd_ext_desc_setup(ab, msg, &ti);
+ if (add_htt_metadata) {
+ ret = ath12k_dp_prepare_htt_metadata(skb_ext_desc);
+ if (ret < 0) {
+ ath12k_dbg(ab, ATH12K_DBG_DP_TX,
+ "Failed to add HTT meta data, dropping packet\n");
+ goto fail_unmap_dma;
+ }
+ }
+
ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data,
skb_ext_desc->len, DMA_TO_DEVICE);
ret = dma_mapping_error(ab->dev, ti.paddr);
@@ -352,15 +485,15 @@ static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab,
u8 pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
skb_cb = ATH12K_SKB_CB(msdu);
+ ar = ab->pdevs[pdev_id].ar;
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
if (skb_cb->paddr_ext_desc)
dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
- dev_kfree_skb_any(msdu);
+ ieee80211_free_txskb(ar->ah->hw, msdu);
- ar = ab->pdevs[pdev_id].ar;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
}
@@ -393,8 +526,12 @@ ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
if (ts->acked) {
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
info->flags |= IEEE80211_TX_STAT_ACK;
- info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR +
- ts->ack_rssi;
+ info->status.ack_signal = ts->ack_rssi;
+
+ if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ab->wmi_ab.svc_map))
+ info->status.ack_signal += ATH12K_DEFAULT_NOISE_FLOOR;
+
info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
} else {
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
@@ -414,7 +551,7 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
struct ath12k_dp_htt_wbm_tx_status ts = {0};
enum hal_wbm_htt_tx_comp_status wbm_status;
- status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
+ status_desc = desc;
wbm_status = le32_get_bits(status_desc->info0,
HTT_TX_WBM_COMP_INFO0_STATUS);
@@ -448,6 +585,7 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
struct hal_tx_status *ts)
{
struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ar->ah;
struct ieee80211_tx_info *info;
struct ath12k_skb_cb *skb_cb;
@@ -466,12 +604,12 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
rcu_read_lock();
if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) {
- dev_kfree_skb_any(msdu);
+ ieee80211_free_txskb(ah->hw, msdu);
goto exit;
}
if (!skb_cb->vif) {
- dev_kfree_skb_any(msdu);
+ ieee80211_free_txskb(ah->hw, msdu);
goto exit;
}
@@ -481,17 +619,39 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
/* skip tx rate update from ieee80211_status*/
info->status.rates[0].idx = -1;
- if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
- !(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
- info->flags |= IEEE80211_TX_STAT_ACK;
- info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR +
- ts->ack_rssi;
- info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
- }
+ switch (ts->status) {
+ case HAL_WBM_TQM_REL_REASON_FRAME_ACKED:
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ts->ack_rssi;
- if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
- (info->flags & IEEE80211_TX_CTL_NO_ACK))
- info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ab->wmi_ab.svc_map))
+ info->status.ack_signal += ATH12K_DEFAULT_NOISE_FLOOR;
+
+ info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
+ }
+ break;
+ case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX:
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ break;
+ }
+ fallthrough;
+ case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU:
+ case HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD:
+ case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES:
+ /* The failure status is due to internal firmware tx failure
+ * hence drop the frame; do not update the status of frame to
+ * the upper layer
+ */
+ ieee80211_free_txskb(ah->hw, msdu);
+ goto exit;
+ default:
+ ath12k_dbg(ab, ATH12K_DBG_DP_TX, "tx frame is not acked status %d\n",
+ ts->status);
+ break;
+ }
/* NOTE: Tx rate status reporting. Tx completion status does not have
* necessary information (for example nss) to build the tx rate.
@@ -669,14 +829,6 @@ ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
*htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
*htt_ring_type = HTT_SW_TO_HW_RING;
break;
- case HAL_TX_MONITOR_BUF:
- *htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING;
- *htt_ring_type = HTT_SW_TO_HW_RING;
- break;
- case HAL_TX_MONITOR_DST:
- *htt_ring_id = HTT_TX_MON_MON2HOST_DEST_RING;
- *htt_ring_type = HTT_HW_TO_SW_RING;
- break;
default:
ath12k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
ret = -EINVAL;
@@ -854,7 +1006,7 @@ int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask)
int ret;
int i;
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
skb = ath12k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
@@ -1007,6 +1159,7 @@ ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
struct htt_ext_stats_cfg_cmd *cmd;
int len = sizeof(*cmd);
int ret;
+ u32 pdev_id;
skb = ath12k_htc_alloc_skb(ab, len);
if (!skb)
@@ -1018,7 +1171,8 @@ ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
memset(cmd, 0, sizeof(*cmd));
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
- cmd->hdr.pdev_mask = 1 << ar->pdev->pdev_id;
+ pdev_id = ath12k_mac_get_target_pdev_id(ar);
+ cmd->hdr.pdev_mask = 1 << pdev_id;
cmd->hdr.stats_type = type;
cmd->cfg_param0 = cpu_to_le32(cfg_params->cfg0);
@@ -1044,13 +1198,7 @@ int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset)
struct ath12k_base *ab = ar->ab;
int ret;
- ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset);
- if (ret) {
- ath12k_err(ab, "failed to setup tx monitor filter %d\n", ret);
- return ret;
- }
-
- ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset);
+ ret = ath12k_dp_tx_htt_rx_monitor_mode_ring_config(ar, reset);
if (ret) {
ath12k_err(ab, "failed to setup rx monitor filter %d\n", ret);
return ret;
@@ -1209,31 +1357,3 @@ err_free:
dev_kfree_skb_any(skb);
return ret;
}
-
-int ath12k_dp_tx_htt_tx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
-{
- struct ath12k_base *ab = ar->ab;
- struct ath12k_dp *dp = &ab->dp;
- struct htt_tx_ring_tlv_filter tlv_filter = {0};
- int ret, ring_id;
-
- ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id;
-
- /* TODO: Need to set upstream/downstream tlv filters
- * here
- */
-
- if (ab->hw_params->rxdma1_enable) {
- ret = ath12k_dp_tx_htt_tx_filter_setup(ar->ab, ring_id, 0,
- HAL_TX_MONITOR_BUF,
- DP_RXDMA_REFILL_RING_SIZE,
- &tlv_filter);
- if (ret) {
- ath12k_err(ab,
- "failed to setup filter for monitor buf %d\n", ret);
- return ret;
- }
- }
-
- return 0;
-}
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.h b/drivers/net/wireless/ath/ath12k/dp_tx.h
index 436d77e5e9ee..46dce23501f3 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_TX_H
@@ -12,11 +12,11 @@
struct ath12k_dp_htt_wbm_tx_status {
bool acked;
- int ack_rssi;
+ s8 ack_rssi;
};
int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab);
-int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
+int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
struct sk_buff *skb);
void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id);
@@ -36,6 +36,5 @@ int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id,
int mac_id, enum hal_ring_type ring_type,
int tx_buf_size,
struct htt_tx_ring_tlv_filter *htt_tlv_filter);
-int ath12k_dp_tx_htt_tx_monitor_mode_ring_config(struct ath12k *ar, bool reset);
int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/fw.h b/drivers/net/wireless/ath/ath12k/fw.h
index 3ff041f15fa0..273c003eff3b 100644
--- a/drivers/net/wireless/ath/ath12k/fw.h
+++ b/drivers/net/wireless/ath/ath12k/fw.h
@@ -23,6 +23,9 @@ enum ath12k_fw_features {
*/
ATH12K_FW_FEATURE_MULTI_QRTR_ID = 0,
+ /* The firmware supports MLO capability */
+ ATH12K_FW_FEATURE_MLO,
+
/* keep last */
ATH12K_FW_FEATURE_COUNT,
};
diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c
index 78310da8cfe8..cd59ff8e6c7b 100644
--- a/drivers/net/wireless/ath/ath12k/hal.c
+++ b/drivers/net/wireless/ath/ath12k/hal.c
@@ -181,7 +181,7 @@ static const struct hal_srng_config hw_srng_config_template[] = {
.max_size = HAL_WBM2PPE_RELEASE_RING_BASE_MSB_RING_SIZE,
},
[HAL_TX_MONITOR_BUF] = {
- .start_ring_id = HAL_SRNG_SW2TXMON_BUF0,
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0,
.max_rings = 1,
.entry_size = sizeof(struct hal_mon_buf_ring) >> 2,
.mac_type = ATH12K_HAL_SRNG_PMAC,
@@ -385,13 +385,13 @@ static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.qcn9274.msdu_end.info12,
- RX_MSDU_END_QCN9274_INFO12_MIMO_SS_BITMAP);
+ RX_MSDU_END_INFO12_MIMO_SS_BITMAP);
}
static u8 ath12k_hw_qcn9274_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
{
return le16_get_bits(desc->u.qcn9274.msdu_end.info5,
- RX_MSDU_END_QCN9274_INFO5_TID);
+ RX_MSDU_END_INFO5_TID);
}
static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
@@ -846,13 +846,13 @@ static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_pkt_type(struct hal_rx_desc
static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
- RX_MSDU_END_QCN9274_INFO12_MIMO_SS_BITMAP);
+ RX_MSDU_END_INFO12_MIMO_SS_BITMAP);
}
static u8 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
{
return le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
- RX_MSDU_END_QCN9274_INFO5_TID);
+ RX_MSDU_END_INFO5_TID);
}
static u16 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
@@ -1198,7 +1198,7 @@ static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
- RX_MSDU_END_WCN7850_INFO12_MIMO_SS_BITMAP);
+ RX_MSDU_END_INFO12_MIMO_SS_BITMAP);
}
static u8 ath12k_hw_wcn7850_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
@@ -1216,7 +1216,7 @@ static void ath12k_hw_wcn7850_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc,
struct hal_rx_desc *ldesc)
{
memcpy(&fdesc->u.wcn7850.msdu_end, &ldesc->u.wcn7850.msdu_end,
- sizeof(struct rx_msdu_end_wcn7850));
+ sizeof(struct rx_msdu_end_qcn9274));
}
static u32 ath12k_hw_wcn7850_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
@@ -1969,14 +1969,15 @@ u32 ath12k_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc
}
void ath12k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
- dma_addr_t paddr)
+ dma_addr_t paddr,
+ enum hal_rx_buf_return_buf_manager rbm)
{
desc->buf_addr_info.info0 = le32_encode_bits((paddr & HAL_ADDR_LSB_REG_MASK),
BUFFER_ADDR_INFO0_ADDR);
desc->buf_addr_info.info1 =
le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT),
BUFFER_ADDR_INFO1_ADDR) |
- le32_encode_bits(1, BUFFER_ADDR_INFO1_RET_BUF_MGR) |
+ le32_encode_bits(rbm, BUFFER_ADDR_INFO1_RET_BUF_MGR) |
le32_encode_bits(cookie, BUFFER_ADDR_INFO1_SW_COOKIE);
}
diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h
index 107927d64bbb..94e2e8735958 100644
--- a/drivers/net/wireless/ath/ath12k/hal.h
+++ b/drivers/net/wireless/ath/ath12k/hal.h
@@ -485,8 +485,8 @@ enum hal_srng_ring_id {
HAL_SRNG_RING_ID_WMAC1_RXMON2SW0 = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
- HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0,
HAL_SRNG_RING_ID_WMAC1_TXMON2SW0_BUF0,
+ HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0,
HAL_SRNG_RING_ID_PMAC1_ID_END,
};
@@ -767,15 +767,15 @@ struct hal_srng_config {
};
/**
- * enum hal_rx_buf_return_buf_manager
+ * enum hal_rx_buf_return_buf_manager - manager for returned rx buffers
*
* @HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list
- * @HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST: Descriptor returned to WBM idle
- * descriptor list, where the chip 0 WBM is chosen in case of a multi-chip config
- * @HAL_RX_BUF_RBM_WBM_CHIP1_IDLE_DESC_LIST: Descriptor returned to WBM idle
- * descriptor list, where the chip 1 WBM is chosen in case of a multi-chip config
- * @HAL_RX_BUF_RBM_WBM_CHIP2_IDLE_DESC_LIST: Descriptor returned to WBM idle
- * descriptor list, where the chip 2 WBM is chosen in case of a multi-chip config
+ * @HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST: Descriptor returned to WBM idle
+ * descriptor list, where the device 0 WBM is chosen in case of a multi-device config
+ * @HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST: Descriptor returned to WBM idle
+ * descriptor list, where the device 1 WBM is chosen in case of a multi-device config
+ * @HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST: Descriptor returned to WBM idle
+ * descriptor list, where the device 2 WBM is chosen in case of a multi-device config
* @HAL_RX_BUF_RBM_FW_BM: Buffer returned to FW
* @HAL_RX_BUF_RBM_SW0_BM: For ring 0 -- returned to host
* @HAL_RX_BUF_RBM_SW1_BM: For ring 1 -- returned to host
@@ -788,9 +788,9 @@ struct hal_srng_config {
enum hal_rx_buf_return_buf_manager {
HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST,
- HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST,
- HAL_RX_BUF_RBM_WBM_CHIP1_IDLE_DESC_LIST,
- HAL_RX_BUF_RBM_WBM_CHIP2_IDLE_DESC_LIST,
+ HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST,
+ HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST,
+ HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST,
HAL_RX_BUF_RBM_FW_BM,
HAL_RX_BUF_RBM_SW0_BM,
HAL_RX_BUF_RBM_SW1_BM,
@@ -1113,7 +1113,8 @@ dma_addr_t ath12k_hal_srng_get_tp_addr(struct ath12k_base *ab,
dma_addr_t ath12k_hal_srng_get_hp_addr(struct ath12k_base *ab,
struct hal_srng *srng);
void ath12k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
- dma_addr_t paddr);
+ dma_addr_t paddr,
+ enum hal_rx_buf_return_buf_manager rbm);
u32 ath12k_hal_ce_get_desc_size(enum hal_ce_desc type);
void ath12k_hal_ce_src_set_desc(struct hal_ce_srng_src_desc *desc, dma_addr_t paddr,
u32 len, u32 id, u8 byte_swap_data);
diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h
index 63340256d3f6..7b0403d245e5 100644
--- a/drivers/net/wireless/ath/ath12k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath12k/hal_desc.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -522,7 +522,7 @@ enum hal_tlv_tag {
HAL_PHYRXHT_SIG_USR_SU = 468 /* 0x1d4 */,
HAL_PHYRXHT_SIG_USR_MU_MIMO = 469 /* 0x1d5 */,
HAL_PHYRX_GENERIC_U_SIG = 470 /* 0x1d6 */,
- HAL_PHYRX_GENERICHT_SIG = 471 /* 0x1d7 */,
+ HAL_PHYRX_GENERIC_EHT_SIG = 471 /* 0x1d7 */,
HAL_OVERWRITE_RESP_START = 472 /* 0x1d8 */,
HAL_OVERWRITE_RESP_PREAMBLE_INFO = 473 /* 0x1d9 */,
HAL_OVERWRITE_RESP_FRAME_INFO = 474 /* 0x1da */,
@@ -579,9 +579,11 @@ struct hal_tlv_hdr {
#define HAL_TLV_64_HDR_TAG GENMASK(9, 1)
#define HAL_TLV_64_HDR_LEN GENMASK(21, 10)
+#define HAL_TLV_64_USR_ID GENMASK(31, 26)
+#define HAL_TLV_64_ALIGN 8
struct hal_tlv_64_hdr {
- u64 tl;
+ __le64 tl;
u8 value[];
} __packed;
@@ -597,8 +599,30 @@ struct hal_tlv_64_hdr {
#define RX_MPDU_DESC_INFO0_MPDU_QOS_CTRL_VALID BIT(27)
#define RX_MPDU_DESC_INFO0_TID GENMASK(31, 28)
-/* TODO revisit after meta data is concluded */
-#define RX_MPDU_DESC_META_DATA_PEER_ID GENMASK(15, 0)
+/* Peer Metadata classification */
+
+/* Version 0 */
+#define RX_MPDU_DESC_META_DATA_V0_PEER_ID GENMASK(15, 0)
+#define RX_MPDU_DESC_META_DATA_V0_VDEV_ID GENMASK(23, 16)
+
+/* Version 1 */
+#define RX_MPDU_DESC_META_DATA_V1_PEER_ID GENMASK(13, 0)
+#define RX_MPDU_DESC_META_DATA_V1_LOGICAL_LINK_ID GENMASK(15, 14)
+#define RX_MPDU_DESC_META_DATA_V1_VDEV_ID GENMASK(23, 16)
+#define RX_MPDU_DESC_META_DATA_V1_LMAC_ID GENMASK(25, 24)
+#define RX_MPDU_DESC_META_DATA_V1_DEVICE_ID GENMASK(28, 26)
+
+/* Version 1A */
+#define RX_MPDU_DESC_META_DATA_V1A_PEER_ID GENMASK(13, 0)
+#define RX_MPDU_DESC_META_DATA_V1A_VDEV_ID GENMASK(21, 14)
+#define RX_MPDU_DESC_META_DATA_V1A_LOGICAL_LINK_ID GENMASK(25, 22)
+#define RX_MPDU_DESC_META_DATA_V1A_DEVICE_ID GENMASK(28, 26)
+
+/* Version 1B */
+#define RX_MPDU_DESC_META_DATA_V1B_PEER_ID GENMASK(13, 0)
+#define RX_MPDU_DESC_META_DATA_V1B_VDEV_ID GENMASK(21, 14)
+#define RX_MPDU_DESC_META_DATA_V1B_HW_LINK_ID GENMASK(25, 22)
+#define RX_MPDU_DESC_META_DATA_V1B_DEVICE_ID GENMASK(28, 26)
struct rx_mpdu_desc {
__le32 info0; /* %RX_MPDU_DESC_INFO */
@@ -2048,6 +2072,19 @@ struct hal_wbm_release_ring {
* fw with fw_reason2.
* @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3: Remove command initiated by
* fw with fw_reason3.
+ * @HAL_WBM_TQM_REL_REASON_CMD_DISABLE_QUEUE: Remove command initiated by
+ * fw with disable queue.
+ * @HAL_WBM_TQM_REL_REASON_CMD_TILL_NONMATCHING: Remove command initiated by
+ * fw to remove all mpdu until 1st non-match.
+ * @HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD: Dropped due to drop threshold
+ * criteria
+ * @HAL_WBM_TQM_REL_REASON_DROP_LINK_DESC_UNAVAIL: Dropped due to link desc
+ * not available
+ * @HAL_WBM_TQM_REL_REASON_DROP_OR_INVALID_MSDU: Dropped due drop bit set or
+ * null flow
+ * @HAL_WBM_TQM_REL_REASON_MULTICAST_DROP: Dropped due mcast drop set for VDEV
+ * @HAL_WBM_TQM_REL_REASON_VDEV_MISMATCH_DROP: Dropped due to being set with
+ * 'TCL_drop_reason'
*/
enum hal_wbm_tqm_rel_reason {
HAL_WBM_TQM_REL_REASON_FRAME_ACKED,
@@ -2058,6 +2095,13 @@ enum hal_wbm_tqm_rel_reason {
HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1,
HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2,
HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3,
+ HAL_WBM_TQM_REL_REASON_CMD_DISABLE_QUEUE,
+ HAL_WBM_TQM_REL_REASON_CMD_TILL_NONMATCHING,
+ HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD,
+ HAL_WBM_TQM_REL_REASON_DROP_LINK_DESC_UNAVAIL,
+ HAL_WBM_TQM_REL_REASON_DROP_OR_INVALID_MSDU,
+ HAL_WBM_TQM_REL_REASON_MULTICAST_DROP,
+ HAL_WBM_TQM_REL_REASON_VDEV_MISMATCH_DROP,
};
struct hal_wbm_buffer_ring {
@@ -2964,4 +3008,29 @@ struct hal_mon_dest_desc {
* updated by SRNG.
*/
+#define HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_FLAG BIT(8)
+#define HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_TYPE GENMASK(16, 15)
+#define HAL_TX_MSDU_METADATA_INFO0_HOST_TX_DESC_POOL BIT(31)
+
+struct hal_tx_msdu_metadata {
+ __le32 info0;
+ __le32 rsvd0[6];
+} __packed;
+
+/* hal_tx_msdu_metadata
+ * valid_encrypt_type
+ * if set, encrypt type is valid
+ * encrypt_type
+ * 0 = NO_ENCRYPT,
+ * 1 = ENCRYPT,
+ * 2 ~ 3 - Reserved
+ * host_tx_desc_pool
+ * If set, Firmware allocates tx_descriptors
+ * in WAL_BUFFERID_TX_HOST_DATA_EXP,instead
+ * of WAL_BUFFERID_TX_TCL_DATA_EXP.
+ * Use cases:
+ * Any time firmware uses TQM-BYPASS for Data
+ * TID, firmware expect host to set this bit.
+ */
+
#endif /* ATH12K_HAL_DESC_H */
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.c b/drivers/net/wireless/ath/ath12k/hal_rx.c
index f7c1aaa3b5d4..ac17d6223fa7 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.c
@@ -26,8 +26,8 @@ static int ath12k_hal_reo_cmd_queue_stats(struct hal_tlv_64_hdr *tlv,
{
struct hal_reo_get_queue_stats *desc;
- tlv->tl = u32_encode_bits(HAL_REO_GET_QUEUE_STATS, HAL_TLV_HDR_TAG) |
- u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
+ tlv->tl = le64_encode_bits(HAL_REO_GET_QUEUE_STATS, HAL_TLV_HDR_TAG) |
+ le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
desc = (struct hal_reo_get_queue_stats *)tlv->value;
memset_startat(desc, 0, queue_addr_lo);
@@ -59,8 +59,8 @@ static int ath12k_hal_reo_cmd_flush_cache(struct ath12k_hal *hal,
hal->current_blk_index = avail_slot;
}
- tlv->tl = u32_encode_bits(HAL_REO_FLUSH_CACHE, HAL_TLV_HDR_TAG) |
- u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
+ tlv->tl = le64_encode_bits(HAL_REO_FLUSH_CACHE, HAL_TLV_HDR_TAG) |
+ le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
desc = (struct hal_reo_flush_cache *)tlv->value;
memset_startat(desc, 0, cache_addr_lo);
@@ -97,8 +97,8 @@ static int ath12k_hal_reo_cmd_update_rx_queue(struct hal_tlv_64_hdr *tlv,
{
struct hal_reo_update_rx_queue *desc;
- tlv->tl = u32_encode_bits(HAL_REO_UPDATE_RX_REO_QUEUE, HAL_TLV_HDR_TAG) |
- u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
+ tlv->tl = le64_encode_bits(HAL_REO_UPDATE_RX_REO_QUEUE, HAL_TLV_HDR_TAG) |
+ le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
desc = (struct hal_reo_update_rx_queue *)tlv->value;
memset_startat(desc, 0, queue_addr_lo);
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.h b/drivers/net/wireless/ath/ath12k/hal_rx.h
index 095216eabc01..b08aa2e79f41 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HAL_RX_H
@@ -19,7 +19,7 @@ struct hal_rx_wbm_rel_info {
bool hw_cc_done;
};
-#define HAL_INVALID_PEERID 0xffff
+#define HAL_INVALID_PEERID 0x3fff
#define VHT_SIG_SU_NSS_MASK 0x7
#define HAL_RX_MAX_MCS 12
@@ -156,6 +156,7 @@ struct hal_rx_mon_ppdu_info {
u32 preamble_type;
u32 mpdu_len;
u16 chan_num;
+ u16 freq;
u16 tcp_msdu_count;
u16 tcp_ack_msdu_count;
u16 udp_msdu_count;
@@ -232,21 +233,27 @@ struct hal_rx_mon_ppdu_info {
u8 medium_prot_type;
};
-#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0)
+#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0)
+#define HAL_RX_PPDU_START_INFO1_CHAN_NUM GENMASK(15, 0)
+#define HAL_RX_PPDU_START_INFO1_CHAN_FREQ GENMASK(31, 16)
struct hal_rx_ppdu_start {
__le32 info0;
- __le32 chan_num;
- __le32 ppdu_start_ts;
+ __le32 info1;
+ __le32 ppdu_start_ts_31_0;
+ __le32 ppdu_start_ts_63_32;
+ __le32 rsvd[2];
} __packed;
-#define HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR GENMASK(25, 16)
+#define HAL_RX_PPDU_END_USER_STATS_INFO0_PEER_ID GENMASK(13, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO0_DEVICE_ID GENMASK(15, 14)
+#define HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR GENMASK(26, 16)
-#define HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK GENMASK(8, 0)
-#define HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID BIT(9)
-#define HAL_RX_PPDU_END_USER_STATS_INFO1_QOS_CTRL_VALID BIT(10)
-#define HAL_RX_PPDU_END_USER_STATS_INFO1_HT_CTRL_VALID BIT(11)
-#define HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE GENMASK(23, 20)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK GENMASK(10, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID BIT(11)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_QOS_CTRL_VALID BIT(12)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_HT_CTRL_VALID BIT(13)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE GENMASK(24, 21)
#define HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX GENMASK(15, 0)
#define HAL_RX_PPDU_END_USER_STATS_INFO2_FRAME_CTRL GENMASK(31, 16)
@@ -262,8 +269,8 @@ struct hal_rx_ppdu_start {
#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP GENMASK(15, 0)
#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_EOSP_BITMAP GENMASK(31, 16)
-#define HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_OK_BYTE_COUNT GENMASK(24, 0)
-#define HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_ERR_BYTE_COUNT GENMASK(24, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO7_MPDU_OK_BYTE_COUNT GENMASK(24, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO8_MPDU_ERR_BYTE_COUNT GENMASK(24, 0)
struct hal_rx_ppdu_end_user_stats {
__le32 rsvd0[2];
@@ -278,9 +285,9 @@ struct hal_rx_ppdu_end_user_stats {
__le32 usr_resp_ref;
__le32 info6;
__le32 rsvd3[4];
- __le32 mpdu_ok_cnt;
+ __le32 info7;
__le32 rsvd4;
- __le32 mpdu_err_cnt;
+ __le32 info8;
__le32 rsvd5[2];
__le32 usr_resp_ref_ext;
__le32 rsvd6;
@@ -294,6 +301,7 @@ struct hal_rx_ppdu_end_user_stats_ext {
__le32 info4;
__le32 info5;
__le32 info6;
+ __le32 rsvd;
} __packed;
#define HAL_RX_HT_SIG_INFO_INFO0_MCS GENMASK(6, 0)
@@ -390,11 +398,9 @@ struct hal_rx_he_sig_a_su_info {
#define HAL_RX_HE_SIG_A_MU_DL_INFO0_DOPPLER_INDICATION BIT(25)
#define HAL_RX_HE_SIG_A_MU_DL_INFO1_TXOP_DURATION GENMASK(6, 0)
-#define HAL_RX_HE_SIG_A_MU_DL_INFO1_CODING BIT(7)
#define HAL_RX_HE_SIG_A_MU_DL_INFO1_NUM_LTF_SYMB GENMASK(10, 8)
#define HAL_RX_HE_SIG_A_MU_DL_INFO1_LDPC_EXTRA BIT(11)
#define HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC BIT(12)
-#define HAL_RX_HE_SIG_A_MU_DL_INFO1_TXBF BIT(10)
#define HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_FACTOR GENMASK(14, 13)
#define HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_PE_DISAM BIT(15)
@@ -420,7 +426,7 @@ struct hal_rx_he_sig_b2_mu_info {
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID GENMASK(10, 0)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS GENMASK(13, 11)
-#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF BIT(19)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF BIT(14)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS GENMASK(18, 15)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM BIT(19)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING BIT(20)
@@ -436,30 +442,35 @@ enum hal_rx_ul_reception_type {
HAL_RECEPTION_TYPE_FRAMELESS
};
-#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB GENMASK(15, 8)
-#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_RSVD1_RECEPTION GENMASK(3, 0)
+#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RECEPTION GENMASK(3, 0)
+#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW GENMASK(7, 5)
+#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB GENMASK(15, 8)
struct hal_rx_phyrx_rssi_legacy_info {
- __le32 rsvd[35];
__le32 info0;
+ __le32 rsvd0[39];
+ __le32 info1;
+ __le32 rsvd1;
} __packed;
-#define HAL_RX_MPDU_START_INFO0_PPDU_ID GENMASK(31, 16)
-#define HAL_RX_MPDU_START_INFO1_PEERID GENMASK(31, 16)
-#define HAL_RX_MPDU_START_INFO2_MPDU_LEN GENMASK(13, 0)
+#define HAL_RX_MPDU_START_INFO0_PPDU_ID GENMASK(31, 16)
+#define HAL_RX_MPDU_START_INFO1_PEERID GENMASK(29, 16)
+#define HAL_RX_MPDU_START_INFO1_DEVICE_ID GENMASK(31, 30)
+#define HAL_RX_MPDU_START_INFO2_MPDU_LEN GENMASK(13, 0)
struct hal_rx_mpdu_start {
+ __le32 rsvd0[9];
__le32 info0;
__le32 info1;
- __le32 rsvd1[11];
+ __le32 rsvd1[2];
__le32 info2;
- __le32 rsvd2[9];
+ __le32 rsvd2[16];
} __packed;
#define HAL_RX_PPDU_END_DURATION GENMASK(23, 0)
struct hal_rx_ppdu_end_duration {
__le32 rsvd0[9];
__le32 info0;
- __le32 rsvd1[4];
+ __le32 rsvd1[18];
} __packed;
struct hal_rx_rxpcu_classification_overview {
diff --git a/drivers/net/wireless/ath/ath12k/hal_tx.h b/drivers/net/wireless/ath/ath12k/hal_tx.h
index 7c837094a6f7..3cf5973771d7 100644
--- a/drivers/net/wireless/ath/ath12k/hal_tx.h
+++ b/drivers/net/wireless/ath/ath12k/hal_tx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HAL_TX_H
@@ -57,7 +57,7 @@ struct hal_tx_info {
struct hal_tx_status {
enum hal_wbm_rel_src_module buf_rel_source;
enum hal_wbm_tqm_rel_reason status;
- u8 ack_rssi;
+ s8 ack_rssi;
u32 flags; /* %HAL_TX_STATUS_FLAGS_ */
u32 ppdu_id;
u8 try_cnt;
diff --git a/drivers/net/wireless/ath/ath12k/hif.h b/drivers/net/wireless/ath/ath12k/hif.h
index c653ca1f59b2..e8840fab6061 100644
--- a/drivers/net/wireless/ath/ath12k/hif.h
+++ b/drivers/net/wireless/ath/ath12k/hif.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HIF_H
@@ -17,7 +17,7 @@ struct ath12k_hif_ops {
int (*start)(struct ath12k_base *ab);
void (*stop)(struct ath12k_base *ab);
int (*power_up)(struct ath12k_base *ab);
- void (*power_down)(struct ath12k_base *ab);
+ void (*power_down)(struct ath12k_base *ab, bool is_suspend);
int (*suspend)(struct ath12k_base *ab);
int (*resume)(struct ath12k_base *ab);
int (*map_service_to_pipe)(struct ath12k_base *ab, u16 service_id,
@@ -30,6 +30,8 @@ struct ath12k_hif_ops {
void (*ce_irq_enable)(struct ath12k_base *ab);
void (*ce_irq_disable)(struct ath12k_base *ab);
void (*get_ce_msi_idx)(struct ath12k_base *ab, u32 ce_id, u32 *msi_idx);
+ int (*panic_handler)(struct ath12k_base *ab);
+ void (*coredump_download)(struct ath12k_base *ab);
};
static inline int ath12k_hif_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
@@ -133,12 +135,31 @@ static inline void ath12k_hif_write32(struct ath12k_base *ab, u32 address,
static inline int ath12k_hif_power_up(struct ath12k_base *ab)
{
+ if (!ab->hif.ops->power_up)
+ return -EOPNOTSUPP;
+
return ab->hif.ops->power_up(ab);
}
-static inline void ath12k_hif_power_down(struct ath12k_base *ab)
+static inline void ath12k_hif_power_down(struct ath12k_base *ab, bool is_suspend)
+{
+ if (!ab->hif.ops->power_down)
+ return;
+
+ ab->hif.ops->power_down(ab, is_suspend);
+}
+
+static inline int ath12k_hif_panic_handler(struct ath12k_base *ab)
{
- ab->hif.ops->power_down(ab);
+ if (!ab->hif.ops->panic_handler)
+ return NOTIFY_DONE;
+
+ return ab->hif.ops->panic_handler(ab);
}
+static inline void ath12k_hif_coredump_download(struct ath12k_base *ab)
+{
+ if (ab->hif.ops->coredump_download)
+ ab->hif.ops->coredump_download(ab);
+}
#endif /* ATH12K_HIF_H */
diff --git a/drivers/net/wireless/ath/ath12k/htc.c b/drivers/net/wireless/ath/ath12k/htc.c
index 23f7428abd95..d13616bf07f4 100644
--- a/drivers/net/wireless/ath/ath12k/htc.c
+++ b/drivers/net/wireless/ath/ath12k/htc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
@@ -244,6 +244,11 @@ static void ath12k_htc_suspend_complete(struct ath12k_base *ab, bool ack)
complete(&ab->htc_suspend);
}
+static void ath12k_htc_wakeup_from_suspend(struct ath12k_base *ab)
+{
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot wakeup from suspend is received\n");
+}
+
void ath12k_htc_rx_completion_handler(struct ath12k_base *ab,
struct sk_buff *skb)
{
@@ -349,6 +354,7 @@ void ath12k_htc_rx_completion_handler(struct ath12k_base *ab,
ath12k_htc_suspend_complete(ab, false);
break;
case ATH12K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID:
+ ath12k_htc_wakeup_from_suspend(ab);
break;
default:
ath12k_warn(ab, "ignoring unsolicited htc ep0 event %u\n",
@@ -358,7 +364,7 @@ void ath12k_htc_rx_completion_handler(struct ath12k_base *ab,
goto out;
}
- ath12k_dbg(ab, ATH12K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
+ ath12k_dbg(ab, ATH12K_DBG_HTC, "htc rx completion ep %d skb %p\n",
eid, skb);
ep->ep_ops.ep_rx_complete(ab, skb);
diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
index 0b17dfd47856..b7b583fadb5a 100644
--- a/drivers/net/wireless/ath/ath12k/hw.c
+++ b/drivers/net/wireless/ath/ath12k/hw.c
@@ -15,6 +15,10 @@
#include "mhi.h"
#include "dp_rx.h"
+static const guid_t wcn7850_uuid = GUID_INIT(0xf634f534, 0x6147, 0x11ec,
+ 0x90, 0xd6, 0x02, 0x42,
+ 0xac, 0x12, 0x00, 0x03);
+
static u8 ath12k_hw_qcn9274_mac_from_pdev_id(int pdev_idx)
{
return pdev_idx;
@@ -540,9 +544,6 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = {
},
.rx_mon_dest = {
0, 0, 0,
- ATH12K_RX_MON_RING_MASK_0,
- ATH12K_RX_MON_RING_MASK_1,
- ATH12K_RX_MON_RING_MASK_2,
},
.rx = {
0, 0, 0, 0,
@@ -568,16 +569,15 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = {
ATH12K_HOST2RXDMA_RING_MASK_0,
},
.tx_mon_dest = {
- ATH12K_TX_MON_RING_MASK_0,
- ATH12K_TX_MON_RING_MASK_1,
+ 0, 0, 0,
},
};
static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850 = {
.tx = {
ATH12K_TX_RING_MASK_0,
+ ATH12K_TX_RING_MASK_1,
ATH12K_TX_RING_MASK_2,
- ATH12K_TX_RING_MASK_4,
},
.rx_mon_dest = {
},
@@ -880,14 +880,15 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.hal_params = &ath12k_hw_hal_params_qcn9274,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 1,
+ .num_rxdma_per_pdev = 1,
.num_rxdma_dst_ring = 0,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_MESH_POINT),
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+ BIT(NL80211_IFTYPE_AP_VLAN),
.supports_monitor = false,
.idle_ps = false,
@@ -912,7 +913,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.rfkill_cfg = 0,
.rfkill_on_level = 0,
- .rddm_size = 0,
+ .rddm_size = 0x600000,
.def_num_link = 0,
.max_mlo_peer = 256,
@@ -920,6 +921,13 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.otp_board_id_register = QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB,
.supports_sta_ps = false,
+
+ .acpi_guid = NULL,
+ .supports_dynamic_smps_6ghz = true,
+
+ .iova_mask = 0,
+
+ .supports_aspm = false,
},
{
.name = "wcn7850 hw2.0",
@@ -950,7 +958,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.hal_params = &ath12k_hw_hal_params_wcn7850,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 2,
+ .num_rxdma_per_pdev = 2,
.num_rxdma_dst_ring = 1,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
@@ -964,7 +972,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.idle_ps = true,
.download_calib = false,
- .supports_suspend = false,
+ .supports_suspend = true,
.tcl_ring_retry = false,
.reoq_lut_support = false,
.supports_shadow_regs = true,
@@ -993,6 +1001,13 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.otp_board_id_register = 0,
.supports_sta_ps = true,
+
+ .acpi_guid = &wcn7850_uuid,
+ .supports_dynamic_smps_6ghz = false,
+
+ .iova_mask = ATH12K_PCIE_MAX_PAYLOAD_SIZE - 1,
+
+ .supports_aspm = true,
},
{
.name = "qcn9274 hw2.0",
@@ -1021,14 +1036,15 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.hal_params = &ath12k_hw_hal_params_qcn9274,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 1,
+ .num_rxdma_per_pdev = 1,
.num_rxdma_dst_ring = 0,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_MESH_POINT),
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+ BIT(NL80211_IFTYPE_AP_VLAN),
.supports_monitor = false,
.idle_ps = false,
@@ -1053,7 +1069,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.rfkill_cfg = 0,
.rfkill_on_level = 0,
- .rddm_size = 0,
+ .rddm_size = 0x600000,
.def_num_link = 0,
.max_mlo_peer = 256,
@@ -1061,6 +1077,13 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.otp_board_id_register = QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB,
.supports_sta_ps = false,
+
+ .acpi_guid = NULL,
+ .supports_dynamic_smps_6ghz = true,
+
+ .iova_mask = 0,
+
+ .supports_aspm = false,
},
};
diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h
index 87965980b938..8d52182e28ae 100644
--- a/drivers/net/wireless/ath/ath12k/hw.h
+++ b/drivers/net/wireless/ath/ath12k/hw.h
@@ -8,6 +8,7 @@
#define ATH12K_HW_H
#include <linux/mhi.h>
+#include <linux/uuid.h>
#include "wmi.h"
#include "hal.h"
@@ -77,9 +78,9 @@
#define TARGET_NUM_WDS_ENTRIES 32
#define TARGET_DMA_BURST_SIZE 1
#define TARGET_RX_BATCHMODE 1
-#define TARGET_RX_PEER_METADATA_VER_V1A 2
-#define TARGET_RX_PEER_METADATA_VER_V1B 3
+#define TARGET_EMA_MAX_PROFILE_PERIOD 8
+#define ATH12K_HW_DEFAULT_QUEUE 0
#define ATH12K_HW_MAX_QUEUES 4
#define ATH12K_QUEUE_LEN 4096
@@ -95,6 +96,8 @@
#define ATH12K_M3_FILE "m3.bin"
#define ATH12K_REGDB_FILE_NAME "regdb.bin"
+#define ATH12K_PCIE_MAX_PAYLOAD_SIZE 128
+
enum ath12k_hw_rate_cck {
ATH12K_HW_RATE_CCK_LP_11M = 0,
ATH12K_HW_RATE_CCK_LP_5_5M,
@@ -172,7 +175,7 @@ struct ath12k_hw_params {
const struct ath12k_hw_hal_params *hal_params;
bool rxdma1_enable:1;
- int num_rxmda_per_pdev;
+ int num_rxdma_per_pdev;
int num_rxdma_dst_ring;
bool rx_mac_buf_ring:1;
bool vdev_start_delay:1;
@@ -186,6 +189,7 @@ struct ath12k_hw_params {
bool tcl_ring_retry:1;
bool reoq_lut_support:1;
bool supports_shadow_regs:1;
+ bool supports_aspm:1;
u32 num_tcl_banks;
u32 max_tx_ring;
@@ -211,6 +215,11 @@ struct ath12k_hw_params {
u32 otp_board_id_register;
bool supports_sta_ps;
+
+ const guid_t *acpi_guid;
+ bool supports_dynamic_smps_6ghz;
+
+ u32 iova_mask;
};
struct ath12k_hw_ops {
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 52a5fb8b03e9..2d062b5904a8 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <net/mac80211.h>
+#include <net/cfg80211.h>
#include <linux/etherdevice.h>
+
#include "mac.h"
#include "core.h"
#include "debug.h"
@@ -14,6 +16,9 @@
#include "dp_tx.h"
#include "dp_rx.h"
#include "peer.h"
+#include "debugfs.h"
+#include "hif.h"
+#include "wow.h"
#define CHAN2G(_channel, _freq, _flags) { \
.band = NL80211_BAND_2GHZ, \
@@ -90,6 +95,10 @@ static const struct ieee80211_channel ath12k_5ghz_channels[] = {
};
static const struct ieee80211_channel ath12k_6ghz_channels[] = {
+ /* Operating Class 136 */
+ CHAN6G(2, 5935, 0),
+
+ /* Operating Classes 131-135 */
CHAN6G(1, 5955, 0),
CHAN6G(5, 5975, 0),
CHAN6G(9, 5995, 0),
@@ -242,7 +251,10 @@ static const u32 ath12k_smps_map[] = {
};
static int ath12k_start_vdev_delay(struct ath12k *ar,
- struct ath12k_vif *arvif);
+ struct ath12k_link_vif *arvif);
+static void ath12k_mac_stop(struct ath12k *ar);
+static int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif);
+static int ath12k_mac_vdev_delete(struct ath12k *ar, struct ath12k_link_vif *arvif);
static const char *ath12k_mac_phymode_str(enum wmi_phy_mode mode)
{
@@ -465,24 +477,66 @@ static u8 ath12k_parse_mpdudensity(u8 mpdudensity)
}
}
-static int ath12k_mac_vif_chan(struct ieee80211_vif *vif,
- struct cfg80211_chan_def *def)
+static int ath12k_mac_vif_link_chan(struct ieee80211_vif *vif, u8 link_id,
+ struct cfg80211_chan_def *def)
{
+ struct ieee80211_bss_conf *link_conf;
struct ieee80211_chanctx_conf *conf;
rcu_read_lock();
- conf = rcu_dereference(vif->bss_conf.chanctx_conf);
+ link_conf = rcu_dereference(vif->link_conf[link_id]);
+
+ if (!link_conf) {
+ rcu_read_unlock();
+ return -ENOLINK;
+ }
+
+ conf = rcu_dereference(link_conf->chanctx_conf);
if (!conf) {
rcu_read_unlock();
return -ENOENT;
}
-
*def = conf->def;
rcu_read_unlock();
return 0;
}
+static struct ieee80211_bss_conf *
+ath12k_mac_get_link_bss_conf(struct ath12k_link_vif *arvif)
+{
+ struct ieee80211_vif *vif = arvif->ahvif->vif;
+ struct ieee80211_bss_conf *link_conf;
+ struct ath12k *ar = arvif->ar;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+ return NULL;
+
+ link_conf = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ vif->link_conf[arvif->link_id]);
+
+ return link_conf;
+}
+
+static struct ieee80211_link_sta *ath12k_mac_get_link_sta(struct ath12k_link_sta *arsta)
+{
+ struct ath12k_sta *ahsta = arsta->ahsta;
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
+ struct ieee80211_link_sta *link_sta;
+
+ lockdep_assert_wiphy(ahsta->ahvif->ah->hw->wiphy);
+
+ if (arsta->link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+ return NULL;
+
+ link_sta = wiphy_dereference(ahsta->ahvif->ah->hw->wiphy,
+ sta->link[arsta->link_id]);
+
+ return link_sta;
+}
+
static bool ath12k_mac_bitrate_is_cck(int bitrate)
{
switch (bitrate) {
@@ -528,18 +582,35 @@ static void ath12k_get_arvif_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct ath12k_vif_iter *arvif_iter = data;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ unsigned long links_map = ahvif->links_map;
+ struct ath12k_link_vif *arvif;
+ u8 link_id;
- if (arvif->vdev_id == arvif_iter->vdev_id)
- arvif_iter->arvif = arvif;
+ for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = rcu_dereference(ahvif->link[link_id]);
+
+ if (WARN_ON(!arvif))
+ continue;
+
+ if (arvif->vdev_id == arvif_iter->vdev_id &&
+ arvif->ar == arvif_iter->ar) {
+ arvif_iter->arvif = arvif;
+ break;
+ }
+ }
}
-struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id)
+struct ath12k_link_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id)
{
struct ath12k_vif_iter arvif_iter = {};
u32 flags;
+ /* To use the arvif returned, caller must have held rcu read lock.
+ */
+ WARN_ON(!rcu_read_lock_any_held());
arvif_iter.vdev_id = vdev_id;
+ arvif_iter.ar = ar;
flags = IEEE80211_IFACE_ITER_RESUME_ALL;
ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar),
@@ -554,12 +625,12 @@ struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id)
return arvif_iter.arvif;
}
-struct ath12k_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab,
- u32 vdev_id)
+struct ath12k_link_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab,
+ u32 vdev_id)
{
int i;
struct ath12k_pdev *pdev;
- struct ath12k_vif *arvif;
+ struct ath12k_link_vif *arvif;
for (i = 0; i < ab->num_radios; i++) {
pdev = rcu_dereference(ab->pdevs_active[i]);
@@ -613,6 +684,161 @@ struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id)
return NULL;
}
+static bool ath12k_mac_is_ml_arvif(struct ath12k_link_vif *arvif)
+{
+ struct ath12k_vif *ahvif = arvif->ahvif;
+
+ lockdep_assert_wiphy(ahvif->ah->hw->wiphy);
+
+ if (ahvif->vif->valid_links & BIT(arvif->link_id))
+ return true;
+
+ return false;
+}
+
+static struct ath12k *ath12k_mac_get_ar_by_chan(struct ieee80211_hw *hw,
+ struct ieee80211_channel *channel)
+{
+ struct ath12k_hw *ah = hw->priv;
+ struct ath12k *ar;
+ int i;
+
+ ar = ah->radio;
+
+ if (ah->num_radio == 1)
+ return ar;
+
+ for_each_ar(ah, ar, i) {
+ if (channel->center_freq >= KHZ_TO_MHZ(ar->freq_range.start_freq) &&
+ channel->center_freq <= KHZ_TO_MHZ(ar->freq_range.end_freq))
+ return ar;
+ }
+ return NULL;
+}
+
+static struct ath12k *ath12k_get_ar_by_ctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ if (!ctx)
+ return NULL;
+
+ return ath12k_mac_get_ar_by_chan(hw, ctx->def.chan);
+}
+
+static struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u8 link_id)
+{
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_link_vif *arvif;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ /* If there is one pdev within ah, then we return
+ * ar directly.
+ */
+ if (ah->num_radio == 1)
+ return ah->radio;
+
+ if (!(ahvif->links_map & BIT(link_id)))
+ return NULL;
+
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ if (arvif && arvif->is_created)
+ return arvif->ar;
+
+ return NULL;
+}
+
+void ath12k_mac_get_any_chanctx_conf_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ struct ath12k_mac_get_any_chanctx_conf_arg *arg = data;
+ struct ath12k *ctx_ar = ath12k_get_ar_by_ctx(hw, conf);
+
+ if (ctx_ar == arg->ar)
+ arg->chanctx_conf = conf;
+}
+
+static struct ath12k_link_vif *ath12k_mac_get_vif_up(struct ath12k *ar)
+{
+ struct ath12k_link_vif *arvif;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_up)
+ return arvif;
+ }
+
+ return NULL;
+}
+
+static bool ath12k_mac_band_match(enum nl80211_band band1, enum WMI_HOST_WLAN_BAND band2)
+{
+ switch (band1) {
+ case NL80211_BAND_2GHZ:
+ if (band2 & WMI_HOST_WLAN_2G_CAP)
+ return true;
+ break;
+ case NL80211_BAND_5GHZ:
+ case NL80211_BAND_6GHZ:
+ if (band2 & WMI_HOST_WLAN_5G_CAP)
+ return true;
+ break;
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+static u8 ath12k_mac_get_target_pdev_id_from_vif(struct ath12k_link_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+ struct ath12k_base *ab = ar->ab;
+ struct ieee80211_vif *vif = arvif->ahvif->vif;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ u8 pdev_id = ab->fw_pdev[0].pdev_id;
+ int i;
+
+ if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
+ return pdev_id;
+
+ band = def.chan->band;
+
+ for (i = 0; i < ab->fw_pdev_count; i++) {
+ if (ath12k_mac_band_match(band, ab->fw_pdev[i].supported_bands))
+ return ab->fw_pdev[i].pdev_id;
+ }
+
+ return pdev_id;
+}
+
+u8 ath12k_mac_get_target_pdev_id(struct ath12k *ar)
+{
+ struct ath12k_link_vif *arvif;
+ struct ath12k_base *ab = ar->ab;
+
+ if (!ab->hw_params->single_pdev_only)
+ return ar->pdev->pdev_id;
+
+ arvif = ath12k_mac_get_vif_up(ar);
+
+ /* fw_pdev array has pdev ids derived from phy capability
+ * service ready event (pdev_and_hw_link_ids).
+ * If no vif is active, return default first index.
+ */
+ if (!arvif)
+ return ar->ab->fw_pdev[0].pdev_id;
+
+ /* If active vif is found, return the pdev id matching chandef band */
+ return ath12k_mac_get_target_pdev_id_from_vif(arvif);
+}
+
static void ath12k_pdev_caps_update(struct ath12k *ar)
{
struct ath12k_base *ab = ar->ab;
@@ -634,11 +860,11 @@ static void ath12k_pdev_caps_update(struct ath12k *ar)
static int ath12k_mac_txpower_recalc(struct ath12k *ar)
{
struct ath12k_pdev *pdev = ar->pdev;
- struct ath12k_vif *arvif;
+ struct ath12k_link_vif *arvif;
int ret, txpower = -1;
u32 param;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->txpower <= 0)
@@ -688,13 +914,13 @@ fail:
return ret;
}
-static int ath12k_recalc_rtscts_prot(struct ath12k_vif *arvif)
+static int ath12k_recalc_rtscts_prot(struct ath12k_link_vif *arvif)
{
struct ath12k *ar = arvif->ar;
u32 vdev_param, rts_cts;
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
vdev_param = WMI_VDEV_PARAM_ENABLE_RTSCTS;
@@ -727,7 +953,7 @@ static int ath12k_recalc_rtscts_prot(struct ath12k_vif *arvif)
return ret;
}
-static int ath12k_mac_set_kickout(struct ath12k_vif *arvif)
+static int ath12k_mac_set_kickout(struct ath12k_link_vif *arvif)
{
struct ath12k *ar = arvif->ar;
u32 param;
@@ -777,11 +1003,14 @@ void ath12k_mac_peer_cleanup_all(struct ath12k *ar)
struct ath12k_peer *peer, *tmp;
struct ath12k_base *ab = ar->ab;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
spin_lock_bh(&ab->base_lock);
list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
- ath12k_dp_rx_peer_tid_cleanup(ar, peer);
+ /* Skip Rx TID cleanup for self peer */
+ if (peer->sta)
+ ath12k_dp_rx_peer_tid_cleanup(ar, peer);
+
list_del(&peer->list);
kfree(peer);
}
@@ -793,7 +1022,7 @@ void ath12k_mac_peer_cleanup_all(struct ath12k *ar)
static int ath12k_mac_vdev_setup_sync(struct ath12k *ar)
{
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
return -ESHUTDOWN;
@@ -810,9 +1039,12 @@ static int ath12k_mac_vdev_setup_sync(struct ath12k *ar)
static int ath12k_monitor_vdev_up(struct ath12k *ar, int vdev_id)
{
+ struct ath12k_wmi_vdev_up_params params = {};
int ret;
- ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
+ params.vdev_id = vdev_id;
+ params.bssid = ar->mac_addr;
+ ret = ath12k_wmi_vdev_up(ar, &params);
if (ret) {
ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
vdev_id, ret);
@@ -829,9 +1061,10 @@ static int ath12k_mac_monitor_vdev_start(struct ath12k *ar, int vdev_id,
{
struct ieee80211_channel *channel;
struct wmi_vdev_start_req_arg arg = {};
+ struct ath12k_wmi_vdev_up_params params = {};
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
channel = chandef->chan;
arg.vdev_id = vdev_id;
@@ -869,7 +1102,9 @@ static int ath12k_mac_monitor_vdev_start(struct ath12k *ar, int vdev_id,
return ret;
}
- ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
+ params.vdev_id = vdev_id;
+ params.bssid = ar->mac_addr;
+ ret = ath12k_wmi_vdev_up(ar, &params);
if (ret) {
ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
vdev_id, ret);
@@ -892,7 +1127,7 @@ static int ath12k_mac_monitor_vdev_stop(struct ath12k *ar)
{
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
reinit_completion(&ar->vdev_setup_done);
@@ -922,9 +1157,8 @@ static int ath12k_mac_monitor_vdev_create(struct ath12k *ar)
struct ath12k_wmi_vdev_create_arg arg = {};
int bit, ret;
u8 tmp_addr[6];
- u16 nss;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
if (ar->monitor_vdev_created)
return 0;
@@ -962,19 +1196,6 @@ static int ath12k_mac_monitor_vdev_create(struct ath12k *ar)
return ret;
}
- nss = hweight32(ar->cfg_tx_chainmask) ? : 1;
- ret = ath12k_wmi_vdev_set_param_cmd(ar, ar->monitor_vdev_id,
- WMI_VDEV_PARAM_NSS, nss);
- if (ret) {
- ath12k_warn(ar->ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n",
- ar->monitor_vdev_id, ar->cfg_tx_chainmask, nss, ret);
- return ret;
- }
-
- ret = ath12k_mac_txpower_recalc(ar);
- if (ret)
- return ret;
-
ar->allocated_vdev_map |= 1LL << ar->monitor_vdev_id;
ar->ab->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
ar->num_created_vdevs++;
@@ -990,7 +1211,7 @@ static int ath12k_mac_monitor_vdev_delete(struct ath12k *ar)
int ret;
unsigned long time_left;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
if (!ar->monitor_vdev_created)
return 0;
@@ -1036,7 +1257,7 @@ static int ath12k_mac_monitor_start(struct ath12k *ar)
struct cfg80211_chan_def *chandef = NULL;
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
if (ar->monitor_started)
return 0;
@@ -1066,7 +1287,7 @@ static int ath12k_mac_monitor_stop(struct ath12k *ar)
{
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
if (!ar->monitor_started)
return 0;
@@ -1084,12 +1305,13 @@ static int ath12k_mac_monitor_stop(struct ath12k *ar)
return ret;
}
-static int ath12k_mac_vdev_stop(struct ath12k_vif *arvif)
+int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif)
{
+ struct ath12k_vif *ahvif = arvif->ahvif;
struct ath12k *ar = arvif->ar;
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
reinit_completion(&ar->vdev_setup_done);
@@ -1111,10 +1333,10 @@ static int ath12k_mac_vdev_stop(struct ath12k_vif *arvif)
ar->num_started_vdevs--;
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
- arvif->vif->addr, arvif->vdev_id);
+ ahvif->vif->addr, arvif->vdev_id);
- if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
- clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
+ if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
+ clear_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n",
arvif->vdev_id);
}
@@ -1130,36 +1352,33 @@ static int ath12k_mac_config(struct ath12k *ar, u32 changed)
struct ieee80211_conf *conf = &hw->conf;
int ret = 0;
- mutex_lock(&ar->conf_mutex);
+ lockdep_assert_wiphy(hw->wiphy);
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
ar->monitor_conf_enabled = conf->flags & IEEE80211_CONF_MONITOR;
if (ar->monitor_conf_enabled) {
if (ar->monitor_vdev_created)
- goto exit;
+ return ret;
ret = ath12k_mac_monitor_vdev_create(ar);
if (ret)
- goto exit;
+ return ret;
ret = ath12k_mac_monitor_start(ar);
if (ret)
goto err_mon_del;
} else {
if (!ar->monitor_vdev_created)
- goto exit;
+ return ret;
ret = ath12k_mac_monitor_stop(ar);
if (ret)
- goto exit;
+ return ret;
ath12k_mac_monitor_vdev_delete(ar);
}
}
-exit:
- mutex_unlock(&ar->conf_mutex);
return ret;
err_mon_del:
ath12k_mac_monitor_vdev_delete(ar);
- mutex_unlock(&ar->conf_mutex);
return ret;
}
@@ -1169,7 +1388,9 @@ static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
struct ath12k *ar;
int ret;
- ar = ath12k_ah_to_ar(ah);
+ lockdep_assert_wiphy(hw->wiphy);
+
+ ar = ath12k_ah_to_ar(ah, 0);
ret = ath12k_mac_config(ar, changed);
if (ret)
@@ -1179,7 +1400,7 @@ static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
return ret;
}
-static int ath12k_mac_setup_bcn_p2p_ie(struct ath12k_vif *arvif,
+static int ath12k_mac_setup_bcn_p2p_ie(struct ath12k_link_vif *arvif,
struct sk_buff *bcn)
{
struct ath12k *ar = arvif->ar;
@@ -1236,39 +1457,213 @@ static int ath12k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
return 0;
}
-static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif)
+static void ath12k_mac_set_arvif_ies(struct ath12k_link_vif *arvif, struct sk_buff *bcn,
+ u8 bssid_index, bool *nontx_profile_found)
{
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)bcn->data;
+ const struct element *elem, *nontx, *index, *nie;
+ const u8 *start, *tail;
+ u16 rem_len;
+ u8 i;
+
+ start = bcn->data + ieee80211_get_hdrlen_from_skb(bcn) + sizeof(mgmt->u.beacon);
+ tail = skb_tail_pointer(bcn);
+ rem_len = tail - start;
+
+ arvif->rsnie_present = false;
+ arvif->wpaie_present = false;
+
+ if (cfg80211_find_ie(WLAN_EID_RSN, start, rem_len))
+ arvif->rsnie_present = true;
+ if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPA,
+ start, rem_len))
+ arvif->wpaie_present = true;
+
+ /* Return from here for the transmitted profile */
+ if (!bssid_index)
+ return;
+
+ /* Initial rsnie_present for the nontransmitted profile is set to be same as that
+ * of the transmitted profile. It will be changed if security configurations are
+ * different.
+ */
+ *nontx_profile_found = false;
+ for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, start, rem_len) {
+ /* Fixed minimum MBSSID element length with at least one
+ * nontransmitted BSSID profile is 12 bytes as given below;
+ * 1 (max BSSID indicator) +
+ * 2 (Nontransmitted BSSID profile: Subelement ID + length) +
+ * 4 (Nontransmitted BSSID Capabilities: tag + length + info)
+ * 2 (Nontransmitted BSSID SSID: tag + length)
+ * 3 (Nontransmitted BSSID Index: tag + length + BSSID index
+ */
+ if (elem->datalen < 12 || elem->data[0] < 1)
+ continue; /* Max BSSID indicator must be >=1 */
+
+ for_each_element(nontx, elem->data + 1, elem->datalen - 1) {
+ start = nontx->data;
+
+ if (nontx->id != 0 || nontx->datalen < 4)
+ continue; /* Invalid nontransmitted profile */
+
+ if (nontx->data[0] != WLAN_EID_NON_TX_BSSID_CAP ||
+ nontx->data[1] != 2) {
+ continue; /* Missing nontransmitted BSS capabilities */
+ }
+
+ if (nontx->data[4] != WLAN_EID_SSID)
+ continue; /* Missing SSID for nontransmitted BSS */
+
+ index = cfg80211_find_elem(WLAN_EID_MULTI_BSSID_IDX,
+ start, nontx->datalen);
+ if (!index || index->datalen < 1 || index->data[0] == 0)
+ continue; /* Invalid MBSSID Index element */
+
+ if (index->data[0] == bssid_index) {
+ *nontx_profile_found = true;
+ if (cfg80211_find_ie(WLAN_EID_RSN,
+ nontx->data,
+ nontx->datalen)) {
+ arvif->rsnie_present = true;
+ return;
+ } else if (!arvif->rsnie_present) {
+ return; /* Both tx and nontx BSS are open */
+ }
+
+ nie = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
+ nontx->data,
+ nontx->datalen);
+ if (!nie || nie->datalen < 2)
+ return; /* Invalid non-inheritance element */
+
+ for (i = 1; i < nie->datalen - 1; i++) {
+ if (nie->data[i] == WLAN_EID_RSN) {
+ arvif->rsnie_present = false;
+ break;
+ }
+ }
+
+ return;
+ }
+ }
+ }
+}
+
+static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif)
+{
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ieee80211_bss_conf *bss_conf;
+ struct ath12k_wmi_bcn_tmpl_ema_arg ema_args;
+ struct ieee80211_ema_beacons *beacons;
+ struct ath12k_link_vif *tx_arvif;
+ bool nontx_profile_found = false;
+ struct ath12k_vif *tx_ahvif;
+ int ret = 0;
+ u8 i;
+
+ bss_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!bss_conf) {
+ ath12k_warn(arvif->ar->ab,
+ "failed to get link bss conf to update bcn tmpl for vif %pM link %u\n",
+ ahvif->vif->addr, arvif->link_id);
+ return -ENOLINK;
+ }
+
+ tx_ahvif = ath12k_vif_to_ahvif(ahvif->vif->mbssid_tx_vif);
+ tx_arvif = &tx_ahvif->deflink;
+ beacons = ieee80211_beacon_get_template_ema_list(ath12k_ar_to_hw(tx_arvif->ar),
+ tx_ahvif->vif,
+ tx_arvif->link_id);
+ if (!beacons || !beacons->cnt) {
+ ath12k_warn(arvif->ar->ab,
+ "failed to get ema beacon templates from mac80211\n");
+ return -EPERM;
+ }
+
+ if (tx_arvif == arvif)
+ ath12k_mac_set_arvif_ies(arvif, beacons->bcn[0].skb, 0, NULL);
+
+ for (i = 0; i < beacons->cnt; i++) {
+ if (tx_arvif != arvif && !nontx_profile_found)
+ ath12k_mac_set_arvif_ies(arvif, beacons->bcn[i].skb,
+ bss_conf->bssid_index,
+ &nontx_profile_found);
+
+ ema_args.bcn_cnt = beacons->cnt;
+ ema_args.bcn_index = i;
+ ret = ath12k_wmi_bcn_tmpl(tx_arvif->ar, tx_arvif->vdev_id,
+ &beacons->bcn[i].offs,
+ beacons->bcn[i].skb, &ema_args);
+ if (ret) {
+ ath12k_warn(tx_arvif->ar->ab,
+ "failed to set ema beacon template id %i error %d\n",
+ i, ret);
+ break;
+ }
+ }
+
+ if (tx_arvif != arvif && !nontx_profile_found)
+ ath12k_warn(arvif->ar->ab,
+ "nontransmitted bssid index %u not found in beacon template\n",
+ bss_conf->bssid_index);
+
+ ieee80211_beacon_free_ema_list(beacons);
+ return ret;
+}
+
+static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif)
+{
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
+ struct ieee80211_bss_conf *link_conf;
+ struct ath12k_link_vif *tx_arvif = arvif;
struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
- struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
- struct ieee80211_vif *vif = arvif->vif;
struct ieee80211_mutable_offsets offs = {};
+ struct ath12k_vif *tx_ahvif = ahvif;
+ bool nontx_profile_found = false;
struct sk_buff *bcn;
- struct ieee80211_mgmt *mgmt;
- u8 *ies;
int ret;
- if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ if (ahvif->vdev_type != WMI_VDEV_TYPE_AP)
return 0;
- bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0);
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf to set bcn tmpl for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return -ENOLINK;
+ }
+
+ if (vif->mbssid_tx_vif) {
+ tx_ahvif = ath12k_vif_to_ahvif(vif->mbssid_tx_vif);
+ tx_arvif = &tx_ahvif->deflink;
+ if (tx_arvif != arvif && arvif->is_up)
+ return 0;
+
+ if (link_conf->ema_ap)
+ return ath12k_mac_setup_bcn_tmpl_ema(arvif);
+ }
+
+ bcn = ieee80211_beacon_get_template(ath12k_ar_to_hw(tx_arvif->ar), tx_ahvif->vif,
+ &offs, tx_arvif->link_id);
if (!bcn) {
ath12k_warn(ab, "failed to get beacon template from mac80211\n");
return -EPERM;
}
- ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn);
- ies += sizeof(mgmt->u.beacon);
-
- if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies)))
- arvif->rsnie_present = true;
-
- if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
- WLAN_OUI_TYPE_MICROSOFT_WPA,
- ies, (skb_tail_pointer(bcn) - ies)))
- arvif->wpaie_present = true;
+ if (tx_arvif == arvif) {
+ ath12k_mac_set_arvif_ies(arvif, bcn, 0, NULL);
+ } else {
+ ath12k_mac_set_arvif_ies(arvif, bcn,
+ link_conf->bssid_index,
+ &nontx_profile_found);
+ if (!nontx_profile_found)
+ ath12k_warn(ab,
+ "nontransmitted profile not found in beacon template\n");
+ }
- if (arvif->vif->type == NL80211_IFTYPE_AP && arvif->vif->p2p) {
+ if (ahvif->vif->type == NL80211_IFTYPE_AP && ahvif->vif->p2p) {
ret = ath12k_mac_setup_bcn_p2p_ie(arvif, bcn);
if (ret) {
ath12k_warn(ab, "failed to setup P2P GO bcn ie: %d\n",
@@ -1291,7 +1686,7 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif)
}
}
- ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
+ ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn, NULL);
if (ret)
ath12k_warn(ab, "failed to submit beacon template command: %d\n",
@@ -1302,13 +1697,15 @@ free_bcn_skb:
return ret;
}
-static void ath12k_control_beaconing(struct ath12k_vif *arvif,
+static void ath12k_control_beaconing(struct ath12k_link_vif *arvif,
struct ieee80211_bss_conf *info)
{
+ struct ath12k_wmi_vdev_up_params params = {};
+ struct ath12k_vif *ahvif = arvif->ahvif;
struct ath12k *ar = arvif->ar;
int ret;
- lockdep_assert_held(&arvif->ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(arvif->ar)->wiphy);
if (!info->enable_beacon) {
ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id);
@@ -1328,12 +1725,23 @@ static void ath12k_control_beaconing(struct ath12k_vif *arvif,
return;
}
- arvif->aid = 0;
+ ahvif->aid = 0;
- ether_addr_copy(arvif->bssid, info->bssid);
+ ether_addr_copy(arvif->bssid, info->addr);
- ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
- arvif->bssid);
+ params.vdev_id = arvif->vdev_id;
+ params.aid = ahvif->aid;
+ params.bssid = arvif->bssid;
+ if (ahvif->vif->mbssid_tx_vif) {
+ struct ath12k_vif *tx_ahvif =
+ ath12k_vif_to_ahvif(ahvif->vif->mbssid_tx_vif);
+ struct ath12k_link_vif *tx_arvif = &tx_ahvif->deflink;
+
+ params.tx_bssid = tx_arvif->bssid;
+ params.nontx_profile_idx = info->bssid_index;
+ params.nontx_profile_cnt = 1 << info->bssid_indicator;
+ }
+ ret = ath12k_wmi_vdev_up(arvif->ar, &params);
if (ret) {
ath12k_warn(ar->ab, "failed to bring up vdev %d: %i\n",
arvif->vdev_id, ret);
@@ -1345,48 +1753,137 @@ static void ath12k_control_beaconing(struct ath12k_vif *arvif,
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
}
+static void ath12k_mac_handle_beacon_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct sk_buff *skb = data;
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_link_vif *arvif = &ahvif->deflink;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
+ return;
+
+ cancel_delayed_work(&arvif->connection_loss_work);
+}
+
+void ath12k_mac_handle_beacon(struct ath12k *ar, struct sk_buff *skb)
+{
+ ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar),
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath12k_mac_handle_beacon_iter,
+ skb);
+}
+
+static void ath12k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ u32 *vdev_id = data;
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_link_vif *arvif = &ahvif->deflink;
+ struct ath12k *ar = arvif->ar;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+
+ if (arvif->vdev_id != *vdev_id)
+ return;
+
+ if (!arvif->is_up)
+ return;
+
+ ieee80211_beacon_loss(vif);
+
+ /* Firmware doesn't report beacon loss events repeatedly. If AP probe
+ * (done by mac80211) succeeds but beacons do not resume then it
+ * doesn't make sense to continue operation. Queue connection loss work
+ * which can be cancelled when beacon is received.
+ */
+ ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
+ ATH12K_CONNECTION_LOSS_HZ);
+}
+
+void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id)
+{
+ ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar),
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath12k_mac_handle_beacon_miss_iter,
+ &vdev_id);
+}
+
+static void ath12k_mac_vif_sta_connection_loss_work(struct work_struct *work)
+{
+ struct ath12k_link_vif *arvif = container_of(work, struct ath12k_link_vif,
+ connection_loss_work.work);
+ struct ieee80211_vif *vif = arvif->ahvif->vif;
+
+ if (!arvif->is_up)
+ return;
+
+ ieee80211_connection_loss(vif);
+}
+
static void ath12k_peer_assoc_h_basic(struct ath12k *ar,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+ struct ieee80211_bss_conf *bss_conf;
u32 aid;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(hw->wiphy);
if (vif->type == NL80211_IFTYPE_STATION)
aid = vif->cfg.aid;
else
aid = sta->aid;
- ether_addr_copy(arg->peer_mac, sta->addr);
+ ether_addr_copy(arg->peer_mac, arsta->addr);
arg->vdev_id = arvif->vdev_id;
arg->peer_associd = aid;
arg->auth_flag = true;
/* TODO: STA WAR in ath10k for listen interval required? */
arg->peer_listen_intval = hw->conf.listen_interval;
arg->peer_nss = 1;
- arg->peer_caps = vif->bss_conf.assoc_capability;
+
+ bss_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!bss_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in peer assoc for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return;
+ }
+
+ arg->peer_caps = bss_conf->assoc_capability;
}
static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
- struct ieee80211_bss_conf *info = &vif->bss_conf;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+ struct ieee80211_bss_conf *info;
struct cfg80211_chan_def def;
struct cfg80211_bss *bss;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
const u8 *rsnie = NULL;
const u8 *wpaie = NULL;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(hw->wiphy);
- if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+ info = ath12k_mac_get_link_bss_conf(arvif);
+ if (!info) {
+ ath12k_warn(ar->ab, "unable to access bss link conf for peer assoc crypto for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return;
+ }
+
+ if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
return;
bss = cfg80211_get_bss(hw->wiphy, def.chan, info->bssid, NULL, 0,
@@ -1434,12 +1931,14 @@ static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
}
static void ath12k_peer_assoc_h_rates(struct ath12k *ar,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+ struct ieee80211_link_sta *link_sta;
struct cfg80211_chan_def def;
const struct ieee80211_supported_band *sband;
const struct ieee80211_rate *rates;
@@ -1449,14 +1948,21 @@ static void ath12k_peer_assoc_h_rates(struct ath12k *ar,
u8 rate;
int i;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
+ if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
+ return;
- if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc rates for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
return;
+ }
band = def.chan->band;
sband = hw->wiphy->bands[band];
- ratemask = sta->deflink.supp_rates[band];
+ ratemask = link_sta->supp_rates[band];
ratemask &= arvif->bitrate_mask.control[band].legacy;
rates = sband->bitrates;
@@ -1497,12 +2003,14 @@ ath12k_peer_assoc_h_vht_masked(const u16 *vht_mcs_mask)
}
static void ath12k_peer_assoc_h_ht(struct ath12k *ar,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
- const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+ const struct ieee80211_sta_ht_cap *ht_cap;
+ struct ieee80211_link_sta *link_sta;
struct cfg80211_chan_def def;
enum nl80211_band band;
const u8 *ht_mcs_mask;
@@ -1510,11 +2018,19 @@ static void ath12k_peer_assoc_h_ht(struct ath12k *ar,
u8 max_nss;
u32 stbc;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
- if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+ if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
return;
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc ht for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ ht_cap = &link_sta->ht_cap;
if (!ht_cap->ht_supported)
return;
@@ -1538,7 +2054,7 @@ static void ath12k_peer_assoc_h_ht(struct ath12k *ar,
if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
arg->ldpc_flag = true;
- if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) {
+ if (link_sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
arg->bw_40 = true;
arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG;
}
@@ -1588,7 +2104,7 @@ static void ath12k_peer_assoc_h_ht(struct ath12k *ar,
arg->peer_ht_rates.rates[i] = i;
} else {
arg->peer_ht_rates.num_rates = n;
- arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+ arg->peer_nss = min(link_sta->rx_nss, max_nss);
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
@@ -1658,12 +2174,14 @@ ath12k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
}
static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
- const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+ const struct ieee80211_sta_vht_cap *vht_cap;
+ struct ieee80211_link_sta *link_sta;
struct cfg80211_chan_def def;
enum nl80211_band band;
const u16 *vht_mcs_mask;
@@ -1672,9 +2190,19 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
u8 max_nss, vht_mcs;
int i;
- if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
return;
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc vht for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ vht_cap = &link_sta->vht_cap;
if (!vht_cap->vht_supported)
return;
@@ -1707,10 +2235,10 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
(1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1);
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
arg->bw_80 = true;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160)
arg->bw_160 = true;
/* Calculate peer NSS capability from VHT capabilities if STA
@@ -1724,7 +2252,7 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
vht_mcs_mask[i])
max_nss = i + 1;
}
- arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+ arg->peer_nss = min(link_sta->rx_nss, max_nss);
arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
@@ -1747,23 +2275,44 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
arg->tx_max_mcs_nss = 0xFF;
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
- sta->addr, arg->peer_max_mpdu, arg->peer_flags);
+ arsta->addr, arg->peer_max_mpdu, arg->peer_flags);
/* TODO: rxnss_override */
}
static void ath12k_peer_assoc_h_he(struct ath12k *ar,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
- const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+ const struct ieee80211_sta_he_cap *he_cap;
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_sta *link_sta;
int i;
- u8 ampdu_factor, rx_mcs_80, rx_mcs_160, max_nss;
+ u8 ampdu_factor, max_nss;
+ u8 rx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ u8 rx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
u16 mcs_160_map, mcs_80_map;
bool support_160;
u16 v;
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in peer assoc he for vif %pM link %u",
+ vif->addr, arvif->link_id);
+ return;
+ }
+
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ he_cap = &link_sta->he_cap;
if (!he_cap->has_he)
return;
@@ -1801,13 +2350,13 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
else
max_nss = rx_mcs_80;
- arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+ arg->peer_nss = min(link_sta->rx_nss, max_nss);
memcpy(&arg->peer_he_cap_macinfo, he_cap->he_cap_elem.mac_cap_info,
sizeof(he_cap->he_cap_elem.mac_cap_info));
memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info,
sizeof(he_cap->he_cap_elem.phy_cap_info));
- arg->peer_he_ops = vif->bss_conf.he_oper.params;
+ arg->peer_he_ops = link_conf->he_oper.params;
/* the top most byte is used to indicate BSS color info */
arg->peer_he_ops &= 0xffffff;
@@ -1824,15 +2373,14 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
* request, then use MAX_AMPDU_LEN_FACTOR as 16 to calculate max_ampdu
* length.
*/
- ampdu_factor = (he_cap->he_cap_elem.mac_cap_info[3] &
- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) >>
- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK;
+ ampdu_factor = u8_get_bits(he_cap->he_cap_elem.mac_cap_info[3],
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
if (ampdu_factor) {
- if (sta->deflink.vht_cap.vht_supported)
+ if (link_sta->vht_cap.vht_supported)
arg->peer_max_mpdu = (1 << (IEEE80211_HE_VHT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1;
- else if (sta->deflink.ht_cap.ht_supported)
+ else if (link_sta->ht_cap.ht_supported)
arg->peer_max_mpdu = (1 << (IEEE80211_HE_HT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1;
}
@@ -1873,7 +2421,7 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ)
arg->twt_requester = true;
- switch (sta->deflink.bandwidth) {
+ switch (link_sta->bandwidth) {
case IEEE80211_STA_RX_BW_160:
if (he_cap->he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
@@ -1906,17 +2454,114 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
}
}
-static void ath12k_peer_assoc_h_smps(struct ieee80211_sta *sta,
+static void ath12k_peer_assoc_h_he_6ghz(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+ const struct ieee80211_sta_he_cap *he_cap;
+ struct ieee80211_link_sta *link_sta;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ u8 ampdu_factor, mpdu_density;
+
+ if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
+ return;
+
+ band = def.chan->band;
+
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc he 6ghz for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ he_cap = &link_sta->he_cap;
+
+ if (!arg->he_flag || band != NL80211_BAND_6GHZ || !link_sta->he_6ghz_capa.capa)
+ return;
+
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ arg->bw_40 = true;
+
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ arg->bw_80 = true;
+
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ arg->bw_160 = true;
+
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_320)
+ arg->bw_320 = true;
+
+ arg->peer_he_caps_6ghz = le16_to_cpu(link_sta->he_6ghz_capa.capa);
+
+ mpdu_density = u32_get_bits(arg->peer_he_caps_6ghz,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
+ arg->peer_mpdu_density = ath12k_parse_mpdudensity(mpdu_density);
+
+ /* From IEEE Std 802.11ax-2021 - Section 10.12.2: An HE STA shall be capable of
+ * receiving A-MPDU where the A-MPDU pre-EOF padding length is up to the value
+ * indicated by the Maximum A-MPDU Length Exponent Extension field in the HE
+ * Capabilities element and the Maximum A-MPDU Length Exponent field in HE 6 GHz
+ * Band Capabilities element in the 6 GHz band.
+ *
+ * Here, we are extracting the Max A-MPDU Exponent Extension from HE caps and
+ * factor is the Maximum A-MPDU Length Exponent from HE 6 GHZ Band capability.
+ */
+ ampdu_factor = u8_get_bits(he_cap->he_cap_elem.mac_cap_info[3],
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) +
+ u32_get_bits(arg->peer_he_caps_6ghz,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
+
+ arg->peer_max_mpdu = (1u << (IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1;
+}
+
+static int ath12k_get_smps_from_capa(const struct ieee80211_sta_ht_cap *ht_cap,
+ const struct ieee80211_he_6ghz_capa *he_6ghz_capa,
+ int *smps)
+{
+ if (ht_cap->ht_supported)
+ *smps = u16_get_bits(ht_cap->cap, IEEE80211_HT_CAP_SM_PS);
+ else
+ *smps = le16_get_bits(he_6ghz_capa->capa,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
+
+ if (*smps >= ARRAY_SIZE(ath12k_smps_map))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void ath12k_peer_assoc_h_smps(struct ath12k_link_sta *arsta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
- const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+ const struct ieee80211_he_6ghz_capa *he_6ghz_capa;
+ struct ath12k_link_vif *arvif = arsta->arvif;
+ const struct ieee80211_sta_ht_cap *ht_cap;
+ struct ieee80211_link_sta *link_sta;
+ struct ath12k *ar = arvif->ar;
int smps;
- if (!ht_cap->ht_supported)
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
return;
+ }
+
+ he_6ghz_capa = &link_sta->he_6ghz_capa;
+ ht_cap = &link_sta->ht_cap;
- smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
- smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+ if (!ht_cap->ht_supported && !he_6ghz_capa->capa)
+ return;
+
+ if (ath12k_get_smps_from_capa(ht_cap, he_6ghz_capa, &smps))
+ return;
switch (smps) {
case WLAN_HT_CAP_SM_PS_STATIC:
@@ -1934,13 +2579,13 @@ static void ath12k_peer_assoc_h_smps(struct ieee80211_sta *sta,
}
static void ath12k_peer_assoc_h_qos(struct ath12k *ar,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- switch (arvif->vdev_type) {
+ switch (arvif->ahvif->vdev_type) {
case WMI_VDEV_TYPE_AP:
if (sta->wme) {
/* TODO: Check WME vs QoS */
@@ -1966,19 +2611,20 @@ static void ath12k_peer_assoc_h_qos(struct ath12k *ar,
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM qos %d\n",
- sta->addr, arg->qos_flag);
+ arsta->addr, arg->qos_flag);
}
static int ath12k_peer_assoc_qos_ap(struct ath12k *ar,
- struct ath12k_vif *arvif,
- struct ieee80211_sta *sta)
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta)
{
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
struct ath12k_wmi_ap_ps_arg arg;
u32 max_sp;
u32 uapsd;
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
arg.vdev_id = arvif->vdev_id;
@@ -2005,26 +2651,26 @@ static int ath12k_peer_assoc_qos_ap(struct ath12k *ar,
arg.param = WMI_AP_PS_PEER_PARAM_UAPSD;
arg.value = uapsd;
- ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+ ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg);
if (ret)
goto err;
arg.param = WMI_AP_PS_PEER_PARAM_MAX_SP;
arg.value = max_sp;
- ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+ ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg);
if (ret)
goto err;
/* TODO: revisit during testing */
arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE;
arg.value = DISABLE_SIFS_RESPONSE_TRIGGER;
- ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+ ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg);
if (ret)
goto err;
arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD;
arg.value = DISABLE_SIFS_RESPONSE_TRIGGER;
- ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+ ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg);
if (ret)
goto err;
@@ -2036,17 +2682,17 @@ err:
return ret;
}
-static bool ath12k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
+static bool ath12k_mac_sta_has_ofdm_only(struct ieee80211_link_sta *sta)
{
- return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >>
+ return sta->supp_rates[NL80211_BAND_2GHZ] >>
ATH12K_MAC_FIRST_OFDM_RATE_IDX;
}
static enum wmi_phy_mode ath12k_mac_get_phymode_vht(struct ath12k *ar,
- struct ieee80211_sta *sta)
+ struct ieee80211_link_sta *link_sta)
{
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
- switch (sta->deflink.vht_cap.cap &
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+ switch (link_sta->vht_cap.cap &
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
return MODE_11AC_VHT160;
@@ -2058,125 +2704,137 @@ static enum wmi_phy_mode ath12k_mac_get_phymode_vht(struct ath12k *ar,
}
}
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
return MODE_11AC_VHT80;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
return MODE_11AC_VHT40;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
return MODE_11AC_VHT20;
return MODE_UNKNOWN;
}
static enum wmi_phy_mode ath12k_mac_get_phymode_he(struct ath12k *ar,
- struct ieee80211_sta *sta)
+ struct ieee80211_link_sta *link_sta)
{
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
- if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+ if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
return MODE_11AX_HE160;
- else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ else if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
return MODE_11AX_HE80_80;
/* not sure if this is a valid case? */
return MODE_11AX_HE160;
}
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
return MODE_11AX_HE80;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
return MODE_11AX_HE40;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
return MODE_11AX_HE20;
return MODE_UNKNOWN;
}
static enum wmi_phy_mode ath12k_mac_get_phymode_eht(struct ath12k *ar,
- struct ieee80211_sta *sta)
+ struct ieee80211_link_sta *link_sta)
{
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320)
- if (sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[0] &
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_320)
+ if (link_sta->eht_cap.eht_cap_elem.phy_cap_info[0] &
IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
return MODE_11BE_EHT320;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
- if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+ if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
return MODE_11BE_EHT160;
- if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
return MODE_11BE_EHT80_80;
ath12k_warn(ar->ab, "invalid EHT PHY capability info for 160 Mhz: %d\n",
- sta->deflink.he_cap.he_cap_elem.phy_cap_info[0]);
+ link_sta->he_cap.he_cap_elem.phy_cap_info[0]);
return MODE_11BE_EHT160;
}
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
return MODE_11BE_EHT80;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
return MODE_11BE_EHT40;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
return MODE_11BE_EHT20;
return MODE_UNKNOWN;
}
static void ath12k_peer_assoc_h_phymode(struct ath12k *ar,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_link_sta *link_sta;
struct cfg80211_chan_def def;
enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
enum wmi_phy_mode phymode = MODE_UNKNOWN;
- if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+
+ if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
return;
band = def.chan->band;
ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
switch (band) {
case NL80211_BAND_2GHZ:
- if (sta->deflink.eht_cap.has_eht) {
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->eht_cap.has_eht) {
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11BE_EHT40_2G;
else
phymode = MODE_11BE_EHT20_2G;
- } else if (sta->deflink.he_cap.has_he) {
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ } else if (link_sta->he_cap.has_he) {
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
phymode = MODE_11AX_HE80_2G;
- else if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ else if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AX_HE40_2G;
else
phymode = MODE_11AX_HE20_2G;
- } else if (sta->deflink.vht_cap.vht_supported &&
+ } else if (link_sta->vht_cap.vht_supported &&
!ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AC_VHT40;
else
phymode = MODE_11AC_VHT20;
- } else if (sta->deflink.ht_cap.ht_supported &&
+ } else if (link_sta->ht_cap.ht_supported &&
!ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11NG_HT40;
else
phymode = MODE_11NG_HT20;
- } else if (ath12k_mac_sta_has_ofdm_only(sta)) {
+ } else if (ath12k_mac_sta_has_ofdm_only(link_sta)) {
phymode = MODE_11G;
} else {
phymode = MODE_11B;
@@ -2185,16 +2843,16 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar,
case NL80211_BAND_5GHZ:
case NL80211_BAND_6GHZ:
/* Check EHT first */
- if (sta->deflink.eht_cap.has_eht) {
- phymode = ath12k_mac_get_phymode_eht(ar, sta);
- } else if (sta->deflink.he_cap.has_he) {
- phymode = ath12k_mac_get_phymode_he(ar, sta);
- } else if (sta->deflink.vht_cap.vht_supported &&
+ if (link_sta->eht_cap.has_eht) {
+ phymode = ath12k_mac_get_phymode_eht(ar, link_sta);
+ } else if (link_sta->he_cap.has_he) {
+ phymode = ath12k_mac_get_phymode_he(ar, link_sta);
+ } else if (link_sta->vht_cap.vht_supported &&
!ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
- phymode = ath12k_mac_get_phymode_vht(ar, sta);
- } else if (sta->deflink.ht_cap.ht_supported &&
+ phymode = ath12k_mac_get_phymode_vht(ar, link_sta);
+ } else if (link_sta->ht_cap.ht_supported &&
!ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
- if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth >= IEEE80211_STA_RX_BW_40)
phymode = MODE_11NA_HT40;
else
phymode = MODE_11NA_HT20;
@@ -2207,7 +2865,7 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar,
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM phymode %s\n",
- sta->addr, ath12k_mac_phymode_str(phymode));
+ arsta->addr, ath12k_mac_phymode_str(phymode));
arg->peer_phymode = phymode;
WARN_ON(phymode == MODE_UNKNOWN);
@@ -2277,18 +2935,30 @@ static void ath12k_mac_set_eht_ppe_threshold(const u8 *ppe_thres,
}
static void ath12k_peer_assoc_h_eht(struct ath12k *ar,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
- const struct ieee80211_sta_eht_cap *eht_cap = &sta->deflink.eht_cap;
- const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
const struct ieee80211_eht_mcs_nss_supp_20mhz_only *bw_20;
const struct ieee80211_eht_mcs_nss_supp_bw *bw;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ const struct ieee80211_sta_eht_cap *eht_cap;
+ const struct ieee80211_sta_he_cap *he_cap;
+ struct ieee80211_link_sta *link_sta;
u32 *rx_mcs, *tx_mcs;
- if (!sta->deflink.he_cap.has_he || !eht_cap->has_eht)
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc eht for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ eht_cap = &link_sta->eht_cap;
+ he_cap = &link_sta->he_cap;
+ if (!he_cap->has_he || !eht_cap->has_eht)
return;
arg->eht_flag = true;
@@ -2307,7 +2977,7 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar,
rx_mcs = arg->peer_eht_rx_mcs_set;
tx_mcs = arg->peer_eht_tx_mcs_set;
- switch (sta->deflink.bandwidth) {
+ switch (link_sta->bandwidth) {
case IEEE80211_STA_RX_BW_320:
bw = &eht_cap->eht_mcs_nss_supp.bw._320;
ath12k_mac_set_eht_mcs(bw->rx_tx_mcs9_max_nss,
@@ -2359,47 +3029,109 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar,
arg->punct_bitmap = ~arvif->punct_bitmap;
}
+static void ath12k_peer_assoc_h_mlo(struct ath12k_link_sta *arsta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+ struct peer_assoc_mlo_params *ml = &arg->ml;
+ struct ath12k_sta *ahsta = arsta->ahsta;
+ struct ath12k_link_sta *arsta_p;
+ struct ath12k_link_vif *arvif;
+ unsigned long links;
+ u8 link_id;
+ int i;
+
+ if (!sta->mlo || ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID)
+ return;
+
+ ml->enabled = true;
+ ml->assoc_link = arsta->is_assoc_link;
+
+ /* For now considering the primary umac based on assoc link */
+ ml->primary_umac = arsta->is_assoc_link;
+ ml->peer_id_valid = true;
+ ml->logical_link_idx_valid = true;
+
+ ether_addr_copy(ml->mld_addr, sta->addr);
+ ml->logical_link_idx = arsta->link_idx;
+ ml->ml_peer_id = ahsta->ml_peer_id;
+ ml->ieee_link_id = arsta->link_id;
+ ml->num_partner_links = 0;
+ links = ahsta->links_map;
+
+ rcu_read_lock();
+
+ i = 0;
+
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ if (i >= ATH12K_WMI_MLO_MAX_LINKS)
+ break;
+
+ arsta_p = rcu_dereference(ahsta->link[link_id]);
+ arvif = rcu_dereference(ahsta->ahvif->link[link_id]);
+
+ if (arsta_p == arsta)
+ continue;
+
+ if (!arvif->is_started)
+ continue;
+
+ ml->partner_info[i].vdev_id = arvif->vdev_id;
+ ml->partner_info[i].hw_link_id = arvif->ar->pdev->hw_link_id;
+ ml->partner_info[i].assoc_link = arsta_p->is_assoc_link;
+ ml->partner_info[i].primary_umac = arsta_p->is_assoc_link;
+ ml->partner_info[i].logical_link_idx_valid = true;
+ ml->partner_info[i].logical_link_idx = arsta_p->link_idx;
+ ml->num_partner_links++;
+
+ i++;
+ }
+
+ rcu_read_unlock();
+}
+
static void ath12k_peer_assoc_prepare(struct ath12k *ar,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta,
struct ath12k_wmi_peer_assoc_arg *arg,
bool reassoc)
{
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
memset(arg, 0, sizeof(*arg));
reinit_completion(&ar->peer_assoc_done);
arg->peer_new_assoc = !reassoc;
- ath12k_peer_assoc_h_basic(ar, vif, sta, arg);
- ath12k_peer_assoc_h_crypto(ar, vif, sta, arg);
- ath12k_peer_assoc_h_rates(ar, vif, sta, arg);
- ath12k_peer_assoc_h_ht(ar, vif, sta, arg);
- ath12k_peer_assoc_h_vht(ar, vif, sta, arg);
- ath12k_peer_assoc_h_he(ar, vif, sta, arg);
- ath12k_peer_assoc_h_eht(ar, vif, sta, arg);
- ath12k_peer_assoc_h_qos(ar, vif, sta, arg);
- ath12k_peer_assoc_h_phymode(ar, vif, sta, arg);
- ath12k_peer_assoc_h_smps(sta, arg);
+ ath12k_peer_assoc_h_basic(ar, arvif, arsta, arg);
+ ath12k_peer_assoc_h_crypto(ar, arvif, arsta, arg);
+ ath12k_peer_assoc_h_rates(ar, arvif, arsta, arg);
+ ath12k_peer_assoc_h_ht(ar, arvif, arsta, arg);
+ ath12k_peer_assoc_h_vht(ar, arvif, arsta, arg);
+ ath12k_peer_assoc_h_he(ar, arvif, arsta, arg);
+ ath12k_peer_assoc_h_he_6ghz(ar, arvif, arsta, arg);
+ ath12k_peer_assoc_h_eht(ar, arvif, arsta, arg);
+ ath12k_peer_assoc_h_qos(ar, arvif, arsta, arg);
+ ath12k_peer_assoc_h_phymode(ar, arvif, arsta, arg);
+ ath12k_peer_assoc_h_smps(arsta, arg);
+ ath12k_peer_assoc_h_mlo(arsta, arg);
/* TODO: amsdu_disable req? */
}
-static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_vif *arvif,
+static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_link_vif *arvif,
const u8 *addr,
- const struct ieee80211_sta_ht_cap *ht_cap)
+ const struct ieee80211_sta_ht_cap *ht_cap,
+ const struct ieee80211_he_6ghz_capa *he_6ghz_capa)
{
- int smps;
+ int smps, ret = 0;
- if (!ht_cap->ht_supported)
+ if (!ht_cap->ht_supported && !he_6ghz_capa)
return 0;
- smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
- smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
-
- if (smps >= ARRAY_SIZE(ath12k_smps_map))
- return -EINVAL;
+ ret = ath12k_get_smps_from_capa(ht_cap, he_6ghz_capa, &smps);
+ if (ret < 0)
+ return ret;
return ath12k_wmi_set_peer_param(ar, addr, arvif->vdev_id,
WMI_PEER_MIMO_PS_STATE,
@@ -2407,36 +3139,65 @@ static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_vif *arvif,
}
static void ath12k_bss_assoc(struct ath12k *ar,
- struct ath12k_vif *arvif,
+ struct ath12k_link_vif *arvif,
struct ieee80211_bss_conf *bss_conf)
{
- struct ieee80211_vif *vif = arvif->vif;
- struct ath12k_wmi_peer_assoc_arg peer_arg;
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
+ struct ath12k_wmi_vdev_up_params params = {};
+ struct ieee80211_link_sta *link_sta;
+ u8 link_id = bss_conf->link_id;
+ struct ath12k_link_sta *arsta;
struct ieee80211_sta *ap_sta;
+ struct ath12k_sta *ahsta;
struct ath12k_peer *peer;
bool is_auth = false;
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ struct ath12k_wmi_peer_assoc_arg *peer_arg __free(kfree) =
+ kzalloc(sizeof(*peer_arg), GFP_KERNEL);
+ if (!peer_arg)
+ return;
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
- arvif->vdev_id, arvif->bssid, arvif->aid);
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "mac vdev %i link id %u assoc bssid %pM aid %d\n",
+ arvif->vdev_id, link_id, arvif->bssid, ahvif->aid);
rcu_read_lock();
- ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ /* During ML connection, cfg.ap_addr has the MLD address. For
+ * non-ML connection, it has the BSSID.
+ */
+ ap_sta = ieee80211_find_sta(vif, vif->cfg.ap_addr);
if (!ap_sta) {
ath12k_warn(ar->ab, "failed to find station entry for bss %pM vdev %i\n",
- bss_conf->bssid, arvif->vdev_id);
+ vif->cfg.ap_addr, arvif->vdev_id);
rcu_read_unlock();
return;
}
- ath12k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg, false);
+ ahsta = ath12k_sta_to_ahsta(ap_sta);
+
+ arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahsta->link[link_id]);
+ if (WARN_ON(!arsta)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (WARN_ON(!link_sta)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ ath12k_peer_assoc_prepare(ar, arvif, arsta, peer_arg, false);
rcu_read_unlock();
- ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ ret = ath12k_wmi_send_peer_assoc_cmd(ar, peer_arg);
if (ret) {
ath12k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
bss_conf->bssid, arvif->vdev_id, ret);
@@ -2450,7 +3211,7 @@ static void ath12k_bss_assoc(struct ath12k *ar,
}
ret = ath12k_setup_peer_smps(ar, arvif, bss_conf->bssid,
- &ap_sta->deflink.ht_cap);
+ &link_sta->ht_cap, &link_sta->he_6ghz_capa);
if (ret) {
ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -2459,10 +3220,13 @@ static void ath12k_bss_assoc(struct ath12k *ar,
WARN_ON(arvif->is_up);
- arvif->aid = vif->cfg.aid;
+ ahvif->aid = vif->cfg.aid;
ether_addr_copy(arvif->bssid, bss_conf->bssid);
- ret = ath12k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
+ params.vdev_id = arvif->vdev_id;
+ params.aid = ahvif->aid;
+ params.bssid = arvif->bssid;
+ ret = ath12k_wmi_vdev_up(ar, &params);
if (ret) {
ath12k_warn(ar->ab, "failed to set vdev %d up: %d\n",
arvif->vdev_id, ret);
@@ -2470,6 +3234,7 @@ static void ath12k_bss_assoc(struct ath12k *ar,
}
arvif->is_up = true;
+ arvif->rekey_data.enable_offload = false;
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"mac vdev %d up (associated) bssid %pM aid %d\n",
@@ -2501,11 +3266,11 @@ static void ath12k_bss_assoc(struct ath12k *ar,
}
static void ath12k_bss_disassoc(struct ath12k *ar,
- struct ath12k_vif *arvif)
+ struct ath12k_link_vif *arvif)
{
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
arvif->vdev_id, arvif->bssid);
@@ -2517,7 +3282,9 @@ static void ath12k_bss_disassoc(struct ath12k *ar,
arvif->is_up = false;
- /* TODO: cancel connection_loss_work */
+ memset(&arvif->rekey_data, 0, sizeof(arvif->rekey_data));
+
+ cancel_delayed_work(&arvif->connection_loss_work);
}
static u32 ath12k_mac_get_rate_hw_value(int bitrate)
@@ -2546,22 +3313,30 @@ static u32 ath12k_mac_get_rate_hw_value(int bitrate)
}
static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
- struct ieee80211_vif *vif,
+ struct ath12k_link_vif *arvif,
struct cfg80211_chan_def *def)
{
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
const struct ieee80211_supported_band *sband;
+ struct ieee80211_bss_conf *bss_conf;
u8 basic_rate_idx;
int hw_rate_code;
u32 vdev_param;
u16 bitrate;
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
+ bss_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!bss_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in mgmt rate calc for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return;
+ }
sband = hw->wiphy->bands[def->chan->band];
- basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
+ basic_rate_idx = ffs(bss_conf->basic_rates) - 1;
bitrate = sband->bitrates[basic_rate_idx].bitrate;
hw_rate_code = ath12k_mac_get_rate_hw_value(bitrate);
@@ -2583,9 +3358,19 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
ath12k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret);
}
-static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
+static int
+ath12k_mac_op_change_vif_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 old_links, u16 new_links,
+ struct ieee80211_bss_conf *ol[IEEE80211_MLD_MAX_NUM_LINKS])
+{
+ return 0;
+}
+
+static int ath12k_mac_fils_discovery(struct ath12k_link_vif *arvif,
struct ieee80211_bss_conf *info)
{
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ath12k *ar = arvif->ar;
struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct sk_buff *tmpl;
@@ -2596,7 +3381,7 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
if (info->fils_discovery.max_interval) {
interval = info->fils_discovery.max_interval;
- tmpl = ieee80211_get_fils_discovery_tmpl(hw, arvif->vif);
+ tmpl = ieee80211_get_fils_discovery_tmpl(hw, vif);
if (tmpl)
ret = ath12k_wmi_fils_discovery_tmpl(ar, arvif->vdev_id,
tmpl);
@@ -2604,8 +3389,7 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
unsol_bcast_probe_resp_enabled = 1;
interval = info->unsol_bcast_probe_resp_interval;
- tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw,
- arvif->vif);
+ tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif);
if (tmpl)
ret = ath12k_wmi_probe_resp_tmpl(ar, arvif->vdev_id,
tmpl);
@@ -2630,31 +3414,79 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
return ret;
}
-static void ath12k_mac_vif_setup_ps(struct ath12k_vif *arvif)
+static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u64 changed)
+{
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ unsigned long links = ahvif->links_map;
+ struct ieee80211_bss_conf *info;
+ struct ath12k_link_vif *arvif;
+ struct ath12k *ar;
+ u8 link_id;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ if (changed & BSS_CHANGED_SSID && vif->type == NL80211_IFTYPE_AP) {
+ ahvif->u.ap.ssid_len = vif->cfg.ssid_len;
+ if (vif->cfg.ssid_len)
+ memcpy(ahvif->u.ap.ssid, vif->cfg.ssid, vif->cfg.ssid_len);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ if (!arvif || !arvif->ar)
+ continue;
+
+ ar = arvif->ar;
+
+ if (vif->cfg.assoc) {
+ info = ath12k_mac_get_link_bss_conf(arvif);
+ if (!info)
+ continue;
+
+ ath12k_bss_assoc(ar, arvif, info);
+ } else {
+ ath12k_bss_disassoc(ar, arvif);
+ }
+ }
+ }
+}
+
+static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
{
struct ath12k *ar = arvif->ar;
- struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_vif *vif = arvif->ahvif->vif;
struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf;
enum wmi_sta_powersave_param param;
+ struct ieee80211_bss_conf *info;
enum wmi_sta_ps_mode psmode;
int ret;
int timeout;
bool enable_ps;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
if (vif->type != NL80211_IFTYPE_STATION)
return;
- enable_ps = arvif->ps;
+ enable_ps = arvif->ahvif->ps;
if (enable_ps) {
psmode = WMI_STA_PS_MODE_ENABLED;
param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
timeout = conf->dynamic_ps_timeout;
if (timeout == 0) {
+ info = ath12k_mac_get_link_bss_conf(arvif);
+ if (!info) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in setup ps for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return;
+ }
+
/* firmware doesn't like 0 */
- timeout = ieee80211_tu_to_usec(vif->bss_conf.beacon_int) / 1000;
+ timeout = ieee80211_tu_to_usec(info->beacon_int) / 1000;
}
ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
@@ -2678,11 +3510,12 @@ static void ath12k_mac_vif_setup_ps(struct ath12k_vif *arvif)
}
static void ath12k_mac_bss_info_changed(struct ath12k *ar,
- struct ath12k_vif *arvif,
+ struct ath12k_link_vif *arvif,
struct ieee80211_bss_conf *info,
u64 changed)
{
- struct ieee80211_vif *vif = arvif->vif;
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
struct ieee80211_vif_cfg *vif_cfg = &vif->cfg;
struct cfg80211_chan_def def;
u32 param_id, param_value;
@@ -2696,7 +3529,7 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
u8 rateidx;
u32 rate;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
if (changed & BSS_CHANGED_BEACON_INT) {
arvif->beacon_interval = info->beacon_int;
@@ -2752,10 +3585,10 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
if (changed & BSS_CHANGED_SSID &&
vif->type == NL80211_IFTYPE_AP) {
- arvif->u.ap.ssid_len = vif->cfg.ssid_len;
+ ahvif->u.ap.ssid_len = vif->cfg.ssid_len;
if (vif->cfg.ssid_len)
- memcpy(arvif->u.ap.ssid, vif->cfg.ssid, vif->cfg.ssid_len);
- arvif->u.ap.hidden_ssid = info->hidden_ssid;
+ memcpy(ahvif->u.ap.ssid, vif->cfg.ssid, vif->cfg.ssid_len);
+ ahvif->u.ap.hidden_ssid = info->hidden_ssid;
}
if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
@@ -2764,8 +3597,8 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
if (changed & BSS_CHANGED_BEACON_ENABLED) {
ath12k_control_beaconing(arvif, info);
- if (arvif->is_up && vif->bss_conf.he_support &&
- vif->bss_conf.he_oper.params) {
+ if (arvif->is_up && info->he_support &&
+ info->he_oper.params) {
/* TODO: Extend to support 1024 BA Bitmap size */
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
WMI_VDEV_PARAM_BA_MODE,
@@ -2776,7 +3609,7 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
arvif->vdev_id);
param_id = WMI_VDEV_PARAM_HEOPS_0_31;
- param_value = vif->bss_conf.he_oper.params;
+ param_value = info->he_oper.params;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, param_value);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
@@ -2866,14 +3699,14 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
}
if (changed & BSS_CHANGED_MCAST_RATE &&
- !ath12k_mac_vif_chan(arvif->vif, &def)) {
+ !ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)) {
band = def.chan->band;
- mcast_rate = vif->bss_conf.mcast_rate[band];
+ mcast_rate = info->mcast_rate[band];
if (mcast_rate > 0)
rateidx = mcast_rate - 1;
else
- rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+ rateidx = ffs(info->basic_rates) - 1;
if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP)
rateidx += ATH12K_MAC_FIRST_OFDM_RATE_IDX;
@@ -2910,8 +3743,8 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
}
if (changed & BSS_CHANGED_BASIC_RATES &&
- !ath12k_mac_vif_chan(arvif->vif, &def))
- ath12k_recalculate_mgmt_rate(ar, vif, &def);
+ !ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))
+ ath12k_recalculate_mgmt_rate(ar, arvif, &def);
if (changed & BSS_CHANGED_TWT) {
if (info->twt_requester || info->twt_responder)
@@ -2956,27 +3789,216 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
if (changed & BSS_CHANGED_PS &&
ar->ab->hw_params->supports_sta_ps) {
- arvif->ps = vif_cfg->ps;
+ ahvif->ps = vif_cfg->ps;
ath12k_mac_vif_setup_ps(arvif);
}
}
-static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info,
- u64 changed)
+static struct ath12k_vif_cache *ath12k_ahvif_get_link_cache(struct ath12k_vif *ahvif,
+ u8 link_id)
+{
+ if (!ahvif->cache[link_id]) {
+ ahvif->cache[link_id] = kzalloc(sizeof(*ahvif->cache[0]), GFP_KERNEL);
+ if (ahvif->cache[link_id])
+ INIT_LIST_HEAD(&ahvif->cache[link_id]->key_conf.list);
+ }
+
+ return ahvif->cache[link_id];
+}
+
+static void ath12k_ahvif_put_link_key_cache(struct ath12k_vif_cache *cache)
+{
+ struct ath12k_key_conf *key_conf, *tmp;
+
+ if (!cache || list_empty(&cache->key_conf.list))
+ return;
+ list_for_each_entry_safe(key_conf, tmp, &cache->key_conf.list, list) {
+ list_del(&key_conf->list);
+ kfree(key_conf);
+ }
+}
+
+static void ath12k_ahvif_put_link_cache(struct ath12k_vif *ahvif, u8 link_id)
+{
+ if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+ return;
+
+ ath12k_ahvif_put_link_key_cache(ahvif->cache[link_id]);
+ kfree(ahvif->cache[link_id]);
+ ahvif->cache[link_id] = NULL;
+}
+
+static void ath12k_mac_op_link_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u64 changed)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_vif_cache *cache;
+ struct ath12k_link_vif *arvif;
+ u8 link_id = info->link_id;
- ar = ath12k_ah_to_ar(ah);
+ lockdep_assert_wiphy(hw->wiphy);
- mutex_lock(&ar->conf_mutex);
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+
+ /* if the vdev is not created on a certain radio,
+ * cache the info to be updated later on vdev creation
+ */
+
+ if (!arvif || !arvif->is_created) {
+ cache = ath12k_ahvif_get_link_cache(ahvif, link_id);
+ if (!cache)
+ return;
+
+ cache->bss_conf_changed |= changed;
+
+ return;
+ }
+
+ ar = arvif->ar;
ath12k_mac_bss_info_changed(ar, arvif, info, changed);
+}
+
+static struct ath12k_link_vif *ath12k_mac_assign_link_vif(struct ath12k_hw *ah,
+ struct ieee80211_vif *vif,
+ u8 link_id)
+{
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_link_vif *arvif;
+ int i;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
- mutex_unlock(&ar->conf_mutex);
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+ if (arvif)
+ return arvif;
+
+ if (!vif->valid_links) {
+ /* Use deflink for Non-ML VIFs and mark the link id as 0
+ */
+ link_id = 0;
+ arvif = &ahvif->deflink;
+ } else {
+ /* If this is the first link arvif being created for an ML VIF
+ * use the preallocated deflink memory except for scan arvifs
+ */
+ if (!ahvif->links_map && link_id != ATH12K_DEFAULT_SCAN_LINK) {
+ arvif = &ahvif->deflink;
+ } else {
+ arvif = (struct ath12k_link_vif *)
+ kzalloc(sizeof(struct ath12k_link_vif), GFP_KERNEL);
+ if (!arvif)
+ return NULL;
+ }
+ }
+
+ arvif->ahvif = ahvif;
+ arvif->link_id = link_id;
+ ahvif->links_map |= BIT(link_id);
+
+ INIT_LIST_HEAD(&arvif->list);
+ INIT_DELAYED_WORK(&arvif->connection_loss_work,
+ ath12k_mac_vif_sta_connection_loss_work);
+
+ for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
+ arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+ memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].ht_mcs));
+ memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+ }
+
+ /* Allocate Default Queue now and reassign during actual vdev create */
+ vif->cab_queue = ATH12K_HW_DEFAULT_QUEUE;
+ for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
+ vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE;
+
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+
+ rcu_assign_pointer(ahvif->link[arvif->link_id], arvif);
+ ahvif->links_map |= BIT(link_id);
+ synchronize_rcu();
+ return arvif;
+}
+
+static void ath12k_mac_unassign_link_vif(struct ath12k_link_vif *arvif)
+{
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ath12k_hw *ah = ahvif->ah;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ rcu_assign_pointer(ahvif->link[arvif->link_id], NULL);
+ synchronize_rcu();
+ ahvif->links_map &= ~BIT(arvif->link_id);
+
+ if (arvif != &ahvif->deflink)
+ kfree(arvif);
+ else
+ memset(arvif, 0, sizeof(*arvif));
+}
+
+static void ath12k_mac_remove_link_interface(struct ieee80211_hw *hw,
+ struct ath12k_link_vif *arvif)
+{
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ath12k_hw *ah = hw->priv;
+ struct ath12k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ cancel_delayed_work_sync(&arvif->connection_loss_work);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac remove link interface (vdev %d link id %d)",
+ arvif->vdev_id, arvif->link_id);
+
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ ret = ath12k_peer_delete(ar, arvif->vdev_id, arvif->bssid);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to submit AP self-peer removal on vdev %d link id %d: %d",
+ arvif->vdev_id, arvif->link_id, ret);
+ }
+ ath12k_mac_vdev_delete(ar, arvif);
+}
+
+static struct ath12k*
+ath12k_mac_select_scan_device(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 center_freq)
+{
+ struct ath12k_hw *ah = hw->priv;
+ enum nl80211_band band;
+ struct ath12k *ar;
+ int i;
+
+ if (ah->num_radio == 1)
+ return ah->radio;
+
+ /* Currently mac80211 supports splitting scan requests into
+ * multiple scan requests per band.
+ * Loop through first channel and determine the scan radio
+ * TODO: There could be 5 GHz low/high channels in that case
+ * split the hw request and perform multiple scans
+ */
+
+ if (center_freq < ATH12K_MIN_5G_FREQ)
+ band = NL80211_BAND_2GHZ;
+ else if (center_freq < ATH12K_MIN_6G_FREQ)
+ band = NL80211_BAND_5GHZ;
+ else
+ band = NL80211_BAND_6GHZ;
+
+ for_each_ar(ah, ar, i) {
+ /* TODO 5 GHz low high split changes */
+ if (ar->mac.sbands[band].channels)
+ return ar;
+ }
+
+ return NULL;
}
void __ath12k_mac_scan_finish(struct ath12k *ar)
@@ -2994,22 +4016,9 @@ void __ath12k_mac_scan_finish(struct ath12k *ar)
ieee80211_remain_on_channel_expired(hw);
fallthrough;
case ATH12K_SCAN_STARTING:
- if (!ar->scan.is_roc) {
- struct cfg80211_scan_info info = {
- .aborted = ((ar->scan.state ==
- ATH12K_SCAN_ABORTING) ||
- (ar->scan.state ==
- ATH12K_SCAN_STARTING)),
- };
-
- ieee80211_scan_completed(hw, &info);
- }
-
- ar->scan.state = ATH12K_SCAN_IDLE;
- ar->scan_channel = NULL;
- ar->scan.roc_freq = 0;
cancel_delayed_work(&ar->scan.timeout);
complete(&ar->scan.completed);
+ wiphy_work_queue(ar->ah->hw->wiphy, &ar->scan.vdev_clean_wk);
break;
}
}
@@ -3029,7 +4038,7 @@ static int ath12k_scan_stop(struct ath12k *ar)
};
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
/* TODO: Fill other STOP Params */
arg.pdev_id = ar->pdev->pdev_id;
@@ -3050,15 +4059,15 @@ static int ath12k_scan_stop(struct ath12k *ar)
}
out:
- /* Scan state should be updated upon scan completion but in case
- * firmware fails to deliver the event (for whatever reason) it is
- * desired to clean up scan state anyway. Firmware may have just
- * dropped the scan completion event delivery due to transport pipe
- * being overflown with data and/or it can recover on its own before
- * next scan request is submitted.
+ /* Scan state should be updated in scan completion worker but in
+ * case firmware fails to deliver the event (for whatever reason)
+ * it is desired to clean up scan state anyway. Firmware may have
+ * just dropped the scan completion event delivery due to transport
+ * pipe being overflown with data and/or it can recover on its own
+ * before next scan request is submitted.
*/
spin_lock_bh(&ar->data_lock);
- if (ar->scan.state != ATH12K_SCAN_IDLE)
+ if (ret)
__ath12k_mac_scan_finish(ar);
spin_unlock_bh(&ar->data_lock);
@@ -3069,7 +4078,7 @@ static void ath12k_scan_abort(struct ath12k *ar)
{
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
spin_lock_bh(&ar->data_lock);
@@ -3104,9 +4113,56 @@ static void ath12k_scan_timeout_work(struct work_struct *work)
struct ath12k *ar = container_of(work, struct ath12k,
scan.timeout.work);
- mutex_lock(&ar->conf_mutex);
+ wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
ath12k_scan_abort(ar);
- mutex_unlock(&ar->conf_mutex);
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
+}
+
+static void ath12k_scan_vdev_clean_work(struct wiphy *wiphy, struct wiphy_work *work)
+{
+ struct ath12k *ar = container_of(work, struct ath12k,
+ scan.vdev_clean_wk);
+ struct ath12k_hw *ah = ar->ah;
+ struct ath12k_link_vif *arvif;
+
+ lockdep_assert_wiphy(wiphy);
+
+ arvif = ar->scan.arvif;
+
+ /* The scan vdev has already been deleted. This can occur when a
+ * new scan request is made on the same vif with a different
+ * frequency, causing the scan arvif to move from one radio to
+ * another. Or, scan was abrupted and via remove interface, the
+ * arvif is already deleted. Alternatively, if the scan vdev is not
+ * being used as an actual vdev, then do not delete it.
+ */
+ if (!arvif || arvif->is_started)
+ goto work_complete;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac clean scan vdev (link id %u)",
+ arvif->link_id);
+
+ ath12k_mac_remove_link_interface(ah->hw, arvif);
+ ath12k_mac_unassign_link_vif(arvif);
+
+work_complete:
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.arvif = NULL;
+ if (!ar->scan.is_roc) {
+ struct cfg80211_scan_info info = {
+ .aborted = ((ar->scan.state ==
+ ATH12K_SCAN_ABORTING) ||
+ (ar->scan.state ==
+ ATH12K_SCAN_STARTING)),
+ };
+
+ ieee80211_scan_completed(ar->ah->hw, &info);
+ }
+
+ ar->scan.state = ATH12K_SCAN_IDLE;
+ ar->scan_channel = NULL;
+ ar->scan.roc_freq = 0;
+ spin_unlock_bh(&ar->data_lock);
}
static int ath12k_start_scan(struct ath12k *ar,
@@ -3114,7 +4170,7 @@ static int ath12k_start_scan(struct ath12k *ar,
{
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
ret = ath12k_wmi_send_scan_start_cmd(ar, arg);
if (ret)
@@ -3143,21 +4199,104 @@ static int ath12k_start_scan(struct ath12k *ar,
return 0;
}
+static u8
+ath12k_mac_find_link_id_by_ar(struct ath12k_vif *ahvif, struct ath12k *ar)
+{
+ struct ath12k_link_vif *arvif;
+ struct ath12k_hw *ah = ahvif->ah;
+ unsigned long links = ahvif->links_map;
+ u8 link_id;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+
+ if (!arvif || !arvif->is_created)
+ continue;
+
+ if (ar == arvif->ar)
+ return link_id;
+ }
+
+ /* input ar is not assigned to any of the links of ML VIF, use scan
+ * link (15) for scan vdev creation.
+ */
+ return ATH12K_DEFAULT_SCAN_LINK;
+}
+
static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_scan_request *hw_req)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_link_vif *arvif;
struct cfg80211_scan_request *req = &hw_req->req;
- struct ath12k_wmi_scan_req_arg arg = {};
+ struct ath12k_wmi_scan_req_arg *arg = NULL;
+ u8 link_id;
int ret;
int i;
+ bool create = true;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ arvif = &ahvif->deflink;
+
+ /* Since the targeted scan device could depend on the frequency
+ * requested in the hw_req, select the corresponding radio
+ */
+ ar = ath12k_mac_select_scan_device(hw, vif, hw_req->req.channels[0]->center_freq);
+ if (!ar)
+ return -EINVAL;
+
+ /* check if any of the links of ML VIF is already started on
+ * radio(ar) correpsondig to given scan frequency and use it,
+ * if not use scan link (link 15) for scan purpose.
+ */
+ link_id = ath12k_mac_find_link_id_by_ar(ahvif, ar);
+ arvif = ath12k_mac_assign_link_vif(ah, vif, link_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac link ID %d selected for scan",
+ arvif->link_id);
+
+ /* If the vif is already assigned to a specific vdev of an ar,
+ * check whether its already started, vdev which is started
+ * are not allowed to switch to a new radio.
+ * If the vdev is not started, but was earlier created on a
+ * different ar, delete that vdev and create a new one. We don't
+ * delete at the scan stop as an optimization to avoid redundant
+ * delete-create vdev's for the same ar, in case the request is
+ * always on the same band for the vif
+ */
+ if (arvif->is_created) {
+ if (WARN_ON(!arvif->ar))
+ return -EINVAL;
+
+ if (ar != arvif->ar && arvif->is_started)
+ return -EINVAL;
- ar = ath12k_ah_to_ar(ah);
+ if (ar != arvif->ar) {
+ ath12k_mac_remove_link_interface(hw, arvif);
+ ath12k_mac_unassign_link_vif(arvif);
+ } else {
+ create = false;
+ }
+ }
- mutex_lock(&ar->conf_mutex);
+ if (create) {
+ /* Previous arvif would've been cleared in radio switch block
+ * above, assign arvif again for create.
+ */
+ arvif = ath12k_mac_assign_link_vif(ah, vif, link_id);
+
+ ret = ath12k_mac_vdev_create(ar, arvif);
+ if (ret) {
+ ath12k_warn(ar->ab, "unable to create scan vdev %d\n", ret);
+ return -EINVAL;
+ }
+ }
spin_lock_bh(&ar->data_lock);
switch (ar->scan.state) {
@@ -3166,7 +4305,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
reinit_completion(&ar->scan.completed);
ar->scan.state = ATH12K_SCAN_STARTING;
ar->scan.is_roc = false;
- ar->scan.vdev_id = arvif->vdev_id;
+ ar->scan.arvif = arvif;
ret = 0;
break;
case ATH12K_SCAN_STARTING:
@@ -3180,42 +4319,47 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
if (ret)
goto exit;
- ath12k_wmi_start_scan_init(ar, &arg);
- arg.vdev_id = arvif->vdev_id;
- arg.scan_id = ATH12K_SCAN_ID;
+ arg = kzalloc(sizeof(*arg), GFP_KERNEL);
+ if (!arg) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ath12k_wmi_start_scan_init(ar, arg);
+ arg->vdev_id = arvif->vdev_id;
+ arg->scan_id = ATH12K_SCAN_ID;
if (req->ie_len) {
- arg.extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL);
- if (!arg.extraie.ptr) {
+ arg->extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL);
+ if (!arg->extraie.ptr) {
ret = -ENOMEM;
goto exit;
}
- arg.extraie.len = req->ie_len;
+ arg->extraie.len = req->ie_len;
}
if (req->n_ssids) {
- arg.num_ssids = req->n_ssids;
- for (i = 0; i < arg.num_ssids; i++)
- arg.ssid[i] = req->ssids[i];
+ arg->num_ssids = req->n_ssids;
+ for (i = 0; i < arg->num_ssids; i++)
+ arg->ssid[i] = req->ssids[i];
} else {
- arg.scan_f_passive = 1;
+ arg->scan_f_passive = 1;
}
if (req->n_channels) {
- arg.num_chan = req->n_channels;
- arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
- GFP_KERNEL);
-
- if (!arg.chan_list) {
+ arg->num_chan = req->n_channels;
+ arg->chan_list = kcalloc(arg->num_chan, sizeof(*arg->chan_list),
+ GFP_KERNEL);
+ if (!arg->chan_list) {
ret = -ENOMEM;
goto exit;
}
- for (i = 0; i < arg.num_chan; i++)
- arg.chan_list[i] = req->channels[i]->center_freq;
+ for (i = 0; i < arg->num_chan; i++)
+ arg->chan_list[i] = req->channels[i]->center_freq;
}
- ret = ath12k_start_scan(ar, &arg);
+ ret = ath12k_start_scan(ar, arg);
if (ret) {
ath12k_warn(ar->ab, "failed to start hw scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
@@ -3223,18 +4367,26 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->data_lock);
}
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac scan started");
+
+ /* As per cfg80211/mac80211 scan design, it allows only one
+ * scan at a time. Hence last_scan link id is used for
+ * tracking the link id on which the scan is been done on
+ * this vif.
+ */
+ ahvif->last_scan_link = arvif->link_id;
+
/* Add a margin to account for event/command processing */
ieee80211_queue_delayed_work(ath12k_ar_to_hw(ar), &ar->scan.timeout,
- msecs_to_jiffies(arg.max_scan_time +
+ msecs_to_jiffies(arg->max_scan_time +
ATH12K_MAC_SCAN_TIMEOUT_MSECS));
exit:
- kfree(arg.chan_list);
-
- if (req->ie_len)
- kfree(arg.extraie.ptr);
-
- mutex_unlock(&ar->conf_mutex);
+ if (arg) {
+ kfree(arg->chan_list);
+ kfree(arg->extraie.ptr);
+ kfree(arg);
+ }
return ret;
}
@@ -3242,19 +4394,25 @@ exit:
static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ u16 link_id = ahvif->last_scan_link;
+ struct ath12k_link_vif *arvif;
struct ath12k *ar;
- ar = ath12k_ah_to_ar(ah);
+ lockdep_assert_wiphy(hw->wiphy);
+
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ if (!arvif || arvif->is_started)
+ return;
+
+ ar = arvif->ar;
- mutex_lock(&ar->conf_mutex);
ath12k_scan_abort(ar);
- mutex_unlock(&ar->conf_mutex);
cancel_delayed_work_sync(&ar->scan.timeout);
}
-static int ath12k_install_key(struct ath12k_vif *arvif,
+static int ath12k_install_key(struct ath12k_link_vif *arvif,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd,
const u8 *macaddr, u32 flags)
@@ -3269,8 +4427,10 @@ static int ath12k_install_key(struct ath12k_vif *arvif,
.key_flags = flags,
.macaddr = macaddr,
};
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
- lockdep_assert_held(&arvif->ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
reinit_completion(&ar->install_key_done);
@@ -3321,13 +4481,13 @@ install:
if (!wait_for_completion_timeout(&ar->install_key_done, 1 * HZ))
return -ETIMEDOUT;
- if (ether_addr_equal(macaddr, arvif->vif->addr))
- arvif->key_cipher = key->cipher;
+ if (ether_addr_equal(macaddr, vif->addr))
+ ahvif->key_cipher = key->cipher;
return ar->install_key_status ? -EINVAL : 0;
}
-static int ath12k_clear_peer_keys(struct ath12k_vif *arvif,
+static int ath12k_clear_peer_keys(struct ath12k_link_vif *arvif,
const u8 *addr)
{
struct ath12k *ar = arvif->ar;
@@ -3338,7 +4498,7 @@ static int ath12k_clear_peer_keys(struct ath12k_vif *arvif,
int i;
u32 flags = 0;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find(ab, arvif->vdev_id, addr);
@@ -3369,49 +4529,48 @@ static int ath12k_clear_peer_keys(struct ath12k_vif *arvif,
return first_errno;
}
-static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
+static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta,
+ struct ieee80211_key_conf *key)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar;
- struct ath12k_base *ab;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_sta *sta = NULL;
+ struct ath12k_base *ab = ar->ab;
struct ath12k_peer *peer;
- struct ath12k_sta *arsta;
+ struct ath12k_sta *ahsta;
const u8 *peer_addr;
- int ret = 0;
+ int ret;
u32 flags = 0;
- /* BIP needs to be done in software */
- if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
- key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
- key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
- key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
- return 1;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
- ar = ath12k_ah_to_ar(ah);
- ab = ar->ab;
+ if (arsta)
+ sta = ath12k_ahsta_to_sta(arsta->ahsta);
- if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
+ if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
return 1;
- if (key->keyidx > WMI_MAX_KEY_INDEX)
- return -ENOSPC;
-
- mutex_lock(&ar->conf_mutex);
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ab, "unable to access bss link conf in set key for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return -ENOLINK;
+ }
if (sta)
- peer_addr = sta->addr;
- else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
- peer_addr = vif->bss_conf.bssid;
+ peer_addr = arsta->addr;
+ else if (ahvif->vdev_type == WMI_VDEV_TYPE_STA)
+ peer_addr = link_conf->bssid;
else
- peer_addr = vif->addr;
+ peer_addr = link_conf->addr;
key->hw_key_idx = key->keyidx;
/* the peer should not disappear in mid-way (unless FW goes awry) since
- * we already hold conf_mutex. we just make sure its there now.
+ * we already hold wiphy lock. we just make sure its there now.
*/
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr);
@@ -3421,14 +4580,13 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (cmd == SET_KEY) {
ath12k_warn(ab, "cannot install key for non-existent peer %pM\n",
peer_addr);
- ret = -EOPNOTSUPP;
- goto exit;
- } else {
- /* if the peer doesn't exist there is no key to disable
- * anymore
- */
- goto exit;
+ return -EOPNOTSUPP;
}
+
+ /* if the peer doesn't exist there is no key to disable
+ * anymore
+ */
+ return 0;
}
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
@@ -3439,13 +4597,13 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = ath12k_install_key(arvif, key, cmd, peer_addr, flags);
if (ret) {
ath12k_warn(ab, "ath12k_install_key failed (%d)\n", ret);
- goto exit;
+ return ret;
}
ret = ath12k_dp_rx_peer_pn_replay_config(arvif, peer_addr, cmd, key);
if (ret) {
ath12k_warn(ab, "failed to offload PN replay detection %d\n", ret);
- goto exit;
+ return ret;
}
spin_lock_bh(&ab->base_lock);
@@ -3470,7 +4628,7 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ath12k_warn(ab, "peer %pM disappeared!\n", peer_addr);
if (sta) {
- arsta = ath12k_sta_to_arsta(sta);
+ ahsta = ath12k_sta_to_ahsta(sta);
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
@@ -3479,21 +4637,152 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
if (cmd == SET_KEY)
- arsta->pn_type = HAL_PN_TYPE_WPA;
+ ahsta->pn_type = HAL_PN_TYPE_WPA;
else
- arsta->pn_type = HAL_PN_TYPE_NONE;
+ ahsta->pn_type = HAL_PN_TYPE_NONE;
break;
default:
- arsta->pn_type = HAL_PN_TYPE_NONE;
+ ahsta->pn_type = HAL_PN_TYPE_NONE;
break;
}
}
spin_unlock_bh(&ab->base_lock);
-exit:
- mutex_unlock(&ar->conf_mutex);
- return ret;
+ return 0;
+}
+
+static int ath12k_mac_update_key_cache(struct ath12k_vif_cache *cache,
+ enum set_key_cmd cmd,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath12k_key_conf *key_conf, *tmp;
+
+ list_for_each_entry_safe(key_conf, tmp, &cache->key_conf.list, list) {
+ if (key_conf->key != key)
+ continue;
+
+ /* If SET key entry is already present in cache, nothing to do,
+ * just return
+ */
+ if (cmd == SET_KEY)
+ return 0;
+
+ /* DEL key for an old SET key which driver hasn't flushed yet.
+ */
+ list_del(&key_conf->list);
+ kfree(key_conf);
+ }
+
+ if (cmd == SET_KEY) {
+ key_conf = kzalloc(sizeof(*key_conf), GFP_KERNEL);
+
+ if (!key_conf)
+ return -ENOMEM;
+
+ key_conf->cmd = cmd;
+ key_conf->sta = sta;
+ key_conf->key = key;
+ list_add_tail(&key_conf->list,
+ &cache->key_conf.list);
+ }
+
+ return 0;
+}
+
+static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_link_vif *arvif;
+ struct ath12k_link_sta *arsta = NULL;
+ struct ath12k_vif_cache *cache;
+ struct ath12k_sta *ahsta;
+ unsigned long links;
+ u8 link_id;
+ int ret;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ /* BIP needs to be done in software */
+ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256) {
+ return 1;
+ }
+
+ if (key->keyidx > WMI_MAX_KEY_INDEX)
+ return -ENOSPC;
+
+ if (sta) {
+ ahsta = ath12k_sta_to_ahsta(sta);
+
+ /* For an ML STA Pairwise key is same for all associated link Stations,
+ * hence do set key for all link STAs which are active.
+ */
+ if (sta->mlo) {
+ links = ahsta->links_map;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(hw->wiphy,
+ ahvif->link[link_id]);
+ arsta = wiphy_dereference(hw->wiphy,
+ ahsta->link[link_id]);
+
+ if (WARN_ON(!arvif || !arsta))
+ /* arvif and arsta are expected to be valid when
+ * STA is present.
+ */
+ continue;
+
+ ret = ath12k_mac_set_key(arvif->ar, cmd, arvif,
+ arsta, key);
+ if (ret)
+ break;
+ }
+
+ return 0;
+ }
+
+ arsta = &ahsta->deflink;
+ arvif = arsta->arvif;
+ if (WARN_ON(!arvif))
+ return -EINVAL;
+
+ ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, arsta, key);
+ if (ret)
+ return ret;
+
+ return 0;
+ }
+
+ if (key->link_id >= 0 && key->link_id < IEEE80211_MLD_MAX_NUM_LINKS) {
+ link_id = key->link_id;
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ } else {
+ link_id = 0;
+ arvif = &ahvif->deflink;
+ }
+
+ if (!arvif || !arvif->is_created) {
+ cache = ath12k_ahvif_get_link_cache(ahvif, link_id);
+ if (!cache)
+ return -ENOSPC;
+
+ ret = ath12k_mac_update_key_cache(cache, cmd, sta, key);
+ if (ret)
+ return ret;
+
+ return 0;
+ }
+
+ ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, NULL, key);
+ if (ret)
+ return ret;
+
+ return 0;
}
static int
@@ -3511,8 +4800,8 @@ ath12k_mac_bitrate_mask_num_vht_rates(struct ath12k *ar,
}
static int
-ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_vif *arvif,
- struct ieee80211_sta *sta,
+ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta,
const struct cfg80211_bitrate_mask *mask,
enum nl80211_band band)
{
@@ -3521,7 +4810,7 @@ ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_vif *arvif,
u32 rate_code;
int ret, i;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
nss = 0;
@@ -3534,61 +4823,76 @@ ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_vif *arvif,
if (!nss) {
ath12k_warn(ar->ab, "No single VHT Fixed rate found to set for %pM",
- sta->addr);
+ arsta->addr);
return -EINVAL;
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates",
- sta->addr);
+ arsta->addr);
rate_code = ATH12K_HW_RATE_CODE(vht_rate, nss - 1,
WMI_RATE_PREAMBLE_VHT);
- ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+ ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id,
WMI_PEER_PARAM_FIXED_RATE,
rate_code);
if (ret)
ath12k_warn(ar->ab,
"failed to update STA %pM Fixed Rate %d: %d\n",
- sta->addr, rate_code, ret);
+ arsta->addr, rate_code, ret);
return ret;
}
-static int ath12k_station_assoc(struct ath12k *ar,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- bool reassoc)
+static int ath12k_mac_station_assoc(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta,
+ bool reassoc)
{
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
- struct ath12k_wmi_peer_assoc_arg peer_arg;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+ struct ieee80211_link_sta *link_sta;
int ret;
struct cfg80211_chan_def def;
enum nl80211_band band;
struct cfg80211_bitrate_mask *mask;
u8 num_vht_rates;
+ u8 link_id = arvif->link_id;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
- if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+ if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
return -EPERM;
+ if (WARN_ON(!rcu_access_pointer(sta->link[link_id])))
+ return -EINVAL;
+
band = def.chan->band;
mask = &arvif->bitrate_mask;
- ath12k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc);
+ struct ath12k_wmi_peer_assoc_arg *peer_arg __free(kfree) =
+ kzalloc(sizeof(*peer_arg), GFP_KERNEL);
+ if (!peer_arg)
+ return -ENOMEM;
+
+ ath12k_peer_assoc_prepare(ar, arvif, arsta, peer_arg, reassoc);
- ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (peer_arg->peer_nss < 1) {
+ ath12k_warn(ar->ab,
+ "invalid peer NSS %d\n", peer_arg->peer_nss);
+ return -EINVAL;
+ }
+ ret = ath12k_wmi_send_peer_assoc_cmd(ar, peer_arg);
if (ret) {
ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
- sta->addr, arvif->vdev_id, ret);
+ arsta->addr, arvif->vdev_id, ret);
return ret;
}
if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
- sta->addr, arvif->vdev_id);
+ arsta->addr, arvif->vdev_id);
return -ETIMEDOUT;
}
@@ -3599,8 +4903,14 @@ static int ath12k_station_assoc(struct ath12k *ar,
* fixed param.
* Note that all other rates and NSS will be disabled for this peer.
*/
- if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
- ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in station assoc\n");
+ return -EINVAL;
+ }
+
+ if (link_sta->vht_cap.vht_supported && num_vht_rates == 1) {
+ ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask,
band);
if (ret)
return ret;
@@ -3612,8 +4922,8 @@ static int ath12k_station_assoc(struct ath12k *ar,
if (reassoc)
return 0;
- ret = ath12k_setup_peer_smps(ar, arvif, sta->addr,
- &sta->deflink.ht_cap);
+ ret = ath12k_setup_peer_smps(ar, arvif, arsta->addr,
+ &link_sta->ht_cap, &link_sta->he_6ghz_capa);
if (ret) {
ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -3628,10 +4938,10 @@ static int ath12k_station_assoc(struct ath12k *ar,
}
if (sta->wme && sta->uapsd_queues) {
- ret = ath12k_peer_assoc_qos_ap(ar, arvif, sta);
+ ret = ath12k_peer_assoc_qos_ap(ar, arvif, arsta);
if (ret) {
ath12k_warn(ar->ab, "failed to set qos params for STA %pM for vdev %i: %d\n",
- sta->addr, arvif->vdev_id, ret);
+ arsta->addr, arvif->vdev_id, ret);
return ret;
}
}
@@ -3639,36 +4949,27 @@ static int ath12k_station_assoc(struct ath12k *ar,
return 0;
}
-static int ath12k_station_disassoc(struct ath12k *ar,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+static int ath12k_mac_station_disassoc(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta)
{
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
- int ret;
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
if (!sta->wme) {
arvif->num_legacy_stations--;
- ret = ath12k_recalc_rtscts_prot(arvif);
- if (ret)
- return ret;
+ return ath12k_recalc_rtscts_prot(arvif);
}
- ret = ath12k_clear_peer_keys(arvif, sta->addr);
- if (ret) {
- ath12k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n",
- arvif->vdev_id, ret);
- return ret;
- }
return 0;
}
-static void ath12k_sta_rc_update_wk(struct work_struct *wk)
+static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk)
{
+ struct ieee80211_link_sta *link_sta;
struct ath12k *ar;
- struct ath12k_vif *arvif;
- struct ath12k_sta *arsta;
+ struct ath12k_link_vif *arvif;
struct ieee80211_sta *sta;
struct cfg80211_chan_def def;
enum nl80211_band band;
@@ -3677,15 +4978,19 @@ static void ath12k_sta_rc_update_wk(struct work_struct *wk)
u32 changed, bw, nss, smps, bw_prev;
int err, num_vht_rates;
const struct cfg80211_bitrate_mask *mask;
- struct ath12k_wmi_peer_assoc_arg peer_arg;
enum wmi_phy_mode peer_phymode;
+ struct ath12k_link_sta *arsta;
+ struct ieee80211_vif *vif;
+
+ lockdep_assert_wiphy(wiphy);
- arsta = container_of(wk, struct ath12k_sta, update_wk);
- sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+ arsta = container_of(wk, struct ath12k_link_sta, update_wk);
+ sta = ath12k_ahsta_to_sta(arsta->ahsta);
arvif = arsta->arvif;
+ vif = ath12k_ahvif_to_vif(arvif->ahvif);
ar = arvif->ar;
- if (WARN_ON(ath12k_mac_vif_chan(arvif->vif, &def)))
+ if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
return;
band = def.chan->band;
@@ -3704,15 +5009,18 @@ static void ath12k_sta_rc_update_wk(struct work_struct *wk)
spin_unlock_bh(&ar->data_lock);
- mutex_lock(&ar->conf_mutex);
-
nss = max_t(u32, 1, nss);
nss = min(nss, max(ath12k_mac_max_ht_nss(ht_mcs_mask),
ath12k_mac_max_vht_nss(vht_mcs_mask)));
+ struct ath12k_wmi_peer_assoc_arg *peer_arg __free(kfree) =
+ kzalloc(sizeof(*peer_arg), GFP_KERNEL);
+ if (!peer_arg)
+ return;
+
if (changed & IEEE80211_RC_BW_CHANGED) {
- ath12k_peer_assoc_h_phymode(ar, arvif->vif, sta, &peer_arg);
- peer_phymode = peer_arg.peer_phymode;
+ ath12k_peer_assoc_h_phymode(ar, arvif, arsta, peer_arg);
+ peer_phymode = peer_arg->peer_phymode;
if (bw > bw_prev) {
/* Phymode shows maximum supported channel width, if we
@@ -3721,65 +5029,65 @@ static void ath12k_sta_rc_update_wk(struct work_struct *wk)
* WMI_PEER_CHWIDTH
*/
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac bandwidth upgrade for sta %pM new %d old %d\n",
- sta->addr, bw, bw_prev);
- err = ath12k_wmi_set_peer_param(ar, sta->addr,
+ arsta->addr, bw, bw_prev);
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id, WMI_PEER_PHYMODE,
peer_phymode);
if (err) {
ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n",
- sta->addr, peer_phymode, err);
- goto err_rc_bw_changed;
+ arsta->addr, peer_phymode, err);
+ return;
}
- err = ath12k_wmi_set_peer_param(ar, sta->addr,
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id, WMI_PEER_CHWIDTH,
bw);
if (err)
ath12k_warn(ar->ab, "failed to update STA %pM to peer bandwidth %d: %d\n",
- sta->addr, bw, err);
+ arsta->addr, bw, err);
} else {
/* When we downgrade bandwidth this will conflict with phymode
* and cause to trigger firmware crash. In this case we send
* WMI_PEER_CHWIDTH followed by WMI_PEER_PHYMODE
*/
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac bandwidth downgrade for sta %pM new %d old %d\n",
- sta->addr, bw, bw_prev);
- err = ath12k_wmi_set_peer_param(ar, sta->addr,
+ arsta->addr, bw, bw_prev);
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id, WMI_PEER_CHWIDTH,
bw);
if (err) {
ath12k_warn(ar->ab, "failed to update STA %pM peer to bandwidth %d: %d\n",
- sta->addr, bw, err);
- goto err_rc_bw_changed;
+ arsta->addr, bw, err);
+ return;
}
- err = ath12k_wmi_set_peer_param(ar, sta->addr,
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id, WMI_PEER_PHYMODE,
peer_phymode);
if (err)
ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n",
- sta->addr, peer_phymode, err);
+ arsta->addr, peer_phymode, err);
}
}
if (changed & IEEE80211_RC_NSS_CHANGED) {
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM nss %d\n",
- sta->addr, nss);
+ arsta->addr, nss);
- err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id,
WMI_PEER_NSS, nss);
if (err)
ath12k_warn(ar->ab, "failed to update STA %pM nss %d: %d\n",
- sta->addr, nss, err);
+ arsta->addr, nss, err);
}
if (changed & IEEE80211_RC_SMPS_CHANGED) {
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM smps %d\n",
- sta->addr, smps);
+ arsta->addr, smps);
- err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id,
WMI_PEER_MIMO_PS_STATE, smps);
if (err)
ath12k_warn(ar->ab, "failed to update STA %pM smps %d: %d\n",
- sta->addr, smps, err);
+ arsta->addr, smps, err);
}
if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
@@ -3798,39 +5106,74 @@ static void ath12k_sta_rc_update_wk(struct work_struct *wk)
* TODO: Check RATEMASK_CMDID to support auto rates selection
* across HT/VHT and for multiple VHT MCS support.
*/
- if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
- ath12k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ if (link_sta->vht_cap.vht_supported && num_vht_rates == 1) {
+ ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask,
band);
} else {
/* If the peer is non-VHT or no fixed VHT rate
* is provided in the new bitrate mask we set the
* other rates using peer_assoc command.
*/
- ath12k_peer_assoc_prepare(ar, arvif->vif, sta,
- &peer_arg, true);
+ ath12k_peer_assoc_prepare(ar, arvif, arsta,
+ peer_arg, true);
- err = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ err = ath12k_wmi_send_peer_assoc_cmd(ar, peer_arg);
if (err)
ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
- sta->addr, arvif->vdev_id, err);
+ arsta->addr, arvif->vdev_id, err);
if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ))
ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
- sta->addr, arvif->vdev_id);
+ arsta->addr, arvif->vdev_id);
}
}
-err_rc_bw_changed:
- mutex_unlock(&ar->conf_mutex);
}
-static int ath12k_mac_inc_num_stations(struct ath12k_vif *arvif,
- struct ieee80211_sta *sta)
+static void ath12k_mac_free_unassign_link_sta(struct ath12k_hw *ah,
+ struct ath12k_sta *ahsta,
+ u8 link_id)
{
+ struct ath12k_link_sta *arsta;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
+ return;
+
+ arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+ if (WARN_ON(!arsta))
+ return;
+
+ ahsta->links_map &= ~BIT(link_id);
+ rcu_assign_pointer(ahsta->link[link_id], NULL);
+ synchronize_rcu();
+
+ if (arsta == &ahsta->deflink) {
+ arsta->link_id = ATH12K_INVALID_LINK_ID;
+ arsta->ahsta = NULL;
+ arsta->arvif = NULL;
+ return;
+ }
+
+ kfree(arsta);
+}
+
+static int ath12k_mac_inc_num_stations(struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta)
+{
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
struct ath12k *ar = arvif->ar;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
- if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
+ if (arvif->ahvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
return 0;
if (ar->num_stations >= ar->max_num_stations)
@@ -3841,38 +5184,176 @@ static int ath12k_mac_inc_num_stations(struct ath12k_vif *arvif,
return 0;
}
-static void ath12k_mac_dec_num_stations(struct ath12k_vif *arvif,
- struct ieee80211_sta *sta)
+static void ath12k_mac_dec_num_stations(struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta)
{
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
struct ath12k *ar = arvif->ar;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
- if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
+ if (arvif->ahvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
return;
ar->num_stations--;
}
+static void ath12k_mac_station_post_remove(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta)
+{
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+ struct ath12k_peer *peer;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ ath12k_mac_dec_num_stations(arvif, arsta);
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
+ if (peer && peer->sta == sta) {
+ ath12k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
+ vif->addr, arvif->vdev_id);
+ peer->sta = NULL;
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+ }
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
+}
+
+static int ath12k_mac_station_unauthorize(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta)
+{
+ struct ath12k_peer *peer;
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
+ if (peer)
+ peer->is_authorized = false;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ /* Driver must clear the keys during the state change from
+ * IEEE80211_STA_AUTHORIZED to IEEE80211_STA_ASSOC, since after
+ * returning from here, mac80211 is going to delete the keys
+ * in __sta_info_destroy_part2(). This will ensure that the driver does
+ * not retain stale key references after mac80211 deletes the keys.
+ */
+ ret = ath12k_clear_peer_keys(arvif, arsta->addr);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath12k_mac_station_authorize(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta)
+{
+ struct ath12k_peer *peer;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
+ if (peer)
+ peer->is_authorized = true;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
+ ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
+ arvif->vdev_id,
+ WMI_PEER_AUTHORIZE,
+ 1);
+ if (ret) {
+ ath12k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
+ arsta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_mac_station_remove(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta)
+{
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ int ret = 0;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ wiphy_work_cancel(ar->ah->hw->wiphy, &arsta->update_wk);
+
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_STA) {
+ ath12k_bss_disassoc(ar, arvif);
+ ret = ath12k_mac_vdev_stop(arvif);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (sta->mlo)
+ return ret;
+
+ ath12k_dp_peer_cleanup(ar, arvif->vdev_id, arsta->addr);
+
+ ret = ath12k_peer_delete(ar, arvif->vdev_id, arsta->addr);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
+ arsta->addr, arvif->vdev_id);
+ else
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
+ arsta->addr, arvif->vdev_id);
+
+ ath12k_mac_station_post_remove(ar, arvif, arsta);
+
+ if (sta->valid_links)
+ ath12k_mac_free_unassign_link_sta(ahvif->ah,
+ arsta->ahsta, arsta->link_id);
+
+ return ret;
+}
+
static int ath12k_mac_station_add(struct ath12k *ar,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta)
{
struct ath12k_base *ab = ar->ab;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
- struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta);
- struct ath12k_wmi_peer_create_arg peer_param;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+ struct ath12k_wmi_peer_create_arg peer_param = {0};
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
- ret = ath12k_mac_inc_num_stations(arvif, sta);
+ ret = ath12k_mac_inc_num_stations(arvif, arsta);
if (ret) {
ath12k_warn(ab, "refusing to associate station: too many connected already (%d)\n",
ar->max_num_stations);
goto exit;
}
-
arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
if (!arsta->rx_stats) {
ret = -ENOMEM;
@@ -3880,40 +5361,41 @@ static int ath12k_mac_station_add(struct ath12k *ar,
}
peer_param.vdev_id = arvif->vdev_id;
- peer_param.peer_addr = sta->addr;
+ peer_param.peer_addr = arsta->addr;
peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+ peer_param.ml_enabled = sta->mlo;
ret = ath12k_peer_create(ar, arvif, sta, &peer_param);
if (ret) {
ath12k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
+ arsta->addr, arvif->vdev_id);
goto free_peer;
}
ath12k_dbg(ab, ATH12K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
+ arsta->addr, arvif->vdev_id);
if (ieee80211_vif_is_mesh(vif)) {
- ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+ ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id,
WMI_PEER_USE_4ADDR, 1);
if (ret) {
ath12k_warn(ab, "failed to STA %pM 4addr capability: %d\n",
- sta->addr, ret);
+ arsta->addr, ret);
goto free_peer;
}
}
- ret = ath12k_dp_peer_setup(ar, arvif->vdev_id, sta->addr);
+ ret = ath12k_dp_peer_setup(ar, arvif->vdev_id, arsta->addr);
if (ret) {
ath12k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n",
- sta->addr, arvif->vdev_id, ret);
+ arsta->addr, arvif->vdev_id, ret);
goto free_peer;
}
if (ab->hw_params->vdev_start_delay &&
!arvif->is_started &&
- arvif->vdev_type != WMI_VDEV_TYPE_AP) {
+ arvif->ahvif->vdev_type != WMI_VDEV_TYPE_AP) {
ret = ath12k_start_vdev_delay(ar, arvif);
if (ret) {
ath12k_warn(ab, "failed to delay vdev start: %d\n", ret);
@@ -3924,9 +5406,11 @@ static int ath12k_mac_station_add(struct ath12k *ar,
return 0;
free_peer:
- ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ ath12k_peer_delete(ar, arvif->vdev_id, arsta->addr);
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
dec_num_station:
- ath12k_mac_dec_num_stations(arvif, sta);
+ ath12k_mac_dec_num_stations(arvif, arsta);
exit:
return ret;
}
@@ -3962,81 +5446,131 @@ static u32 ath12k_mac_ieee80211_sta_bw_to_wmi(struct ath12k *ar,
return bw;
}
-static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- enum ieee80211_sta_state old_state,
- enum ieee80211_sta_state new_state)
+static int ath12k_mac_assign_link_sta(struct ath12k_hw *ah,
+ struct ath12k_sta *ahsta,
+ struct ath12k_link_sta *arsta,
+ struct ath12k_vif *ahvif,
+ u8 link_id)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
+ struct ieee80211_link_sta *link_sta;
+ struct ath12k_link_vif *arvif;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (!arsta || link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+ return -EINVAL;
+
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+ if (!arvif)
+ return -EINVAL;
+
+ memset(arsta, 0, sizeof(*arsta));
+
+ link_sta = wiphy_dereference(ah->hw->wiphy, sta->link[link_id]);
+ if (!link_sta)
+ return -EINVAL;
+
+ ether_addr_copy(arsta->addr, link_sta->addr);
+
+ /* logical index of the link sta in order of creation */
+ arsta->link_idx = ahsta->num_peer++;
+
+ arsta->link_id = link_id;
+ ahsta->links_map |= BIT(arsta->link_id);
+ arsta->arvif = arvif;
+ arsta->ahsta = ahsta;
+ ahsta->ahvif = ahvif;
+
+ wiphy_work_init(&arsta->update_wk, ath12k_sta_rc_update_wk);
+
+ rcu_assign_pointer(ahsta->link[link_id], arsta);
+
+ return 0;
+}
+
+static void ath12k_mac_ml_station_remove(struct ath12k_vif *ahvif,
+ struct ath12k_sta *ahsta)
+{
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
+ struct ath12k_hw *ah = ahvif->ah;
+ struct ath12k_link_vif *arvif;
+ struct ath12k_link_sta *arsta;
+ unsigned long links;
struct ath12k *ar;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
- struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta);
- struct ath12k_peer *peer;
+ u8 link_id;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ ath12k_peer_mlo_link_peers_delete(ahvif, ahsta);
+
+ /* validate link station removal and clear arsta links */
+ links = ahsta->links_map;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+ arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+ if (!arvif || !arsta)
+ continue;
+
+ ar = arvif->ar;
+
+ ath12k_mac_station_post_remove(ar, arvif, arsta);
+
+ ath12k_mac_free_unassign_link_sta(ah, ahsta, link_id);
+ }
+
+ ath12k_peer_ml_delete(ah, sta);
+}
+
+static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+ struct ath12k *ar = arvif->ar;
int ret = 0;
- /* cancel must be done outside the mutex to avoid deadlock */
- if ((old_state == IEEE80211_STA_NONE &&
- new_state == IEEE80211_STA_NOTEXIST))
- cancel_work_sync(&arsta->update_wk);
+ lockdep_assert_wiphy(hw->wiphy);
- ar = ath12k_ah_to_ar(ah);
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac handle link %u sta %pM state %d -> %d\n",
+ arsta->link_id, arsta->addr, old_state, new_state);
- mutex_lock(&ar->conf_mutex);
+ /* IEEE80211_STA_NONE -> IEEE80211_STA_NOTEXIST: Remove the station
+ * from driver
+ */
+ if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ ret = ath12k_mac_station_remove(ar, arvif, arsta);
+ if (ret) {
+ ath12k_warn(ar->ab, "Failed to remove station: %pM for VDEV: %d\n",
+ arsta->addr, arvif->vdev_id);
+ goto exit;
+ }
+ }
+ /* IEEE80211_STA_NOTEXIST -> IEEE80211_STA_NONE: Add new station to driver */
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE) {
- memset(arsta, 0, sizeof(*arsta));
- arsta->arvif = arvif;
- INIT_WORK(&arsta->update_wk, ath12k_sta_rc_update_wk);
-
- ret = ath12k_mac_station_add(ar, vif, sta);
+ ret = ath12k_mac_station_add(ar, arvif, arsta);
if (ret)
ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- } else if ((old_state == IEEE80211_STA_NONE &&
- new_state == IEEE80211_STA_NOTEXIST)) {
- if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
- ath12k_bss_disassoc(ar, arvif);
- ret = ath12k_mac_vdev_stop(arvif);
- if (ret)
- ath12k_warn(ar->ab, "failed to stop vdev %i: %d\n",
- arvif->vdev_id, ret);
- }
- ath12k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
-
- ret = ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
- if (ret)
- ath12k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- else
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
-
- ath12k_mac_dec_num_stations(arvif, sta);
- spin_lock_bh(&ar->ab->base_lock);
- peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (peer && peer->sta == sta) {
- ath12k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
- vif->addr, arvif->vdev_id);
- peer->sta = NULL;
- list_del(&peer->list);
- kfree(peer);
- ar->num_peers--;
- }
- spin_unlock_bh(&ar->ab->base_lock);
+ arsta->addr, arvif->vdev_id);
- kfree(arsta->rx_stats);
- arsta->rx_stats = NULL;
+ /* IEEE80211_STA_AUTH -> IEEE80211_STA_ASSOC: Send station assoc command for
+ * peer associated to AP/Mesh/ADHOC vif type.
+ */
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC &&
(vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT ||
vif->type == NL80211_IFTYPE_ADHOC)) {
- ret = ath12k_station_assoc(ar, vif, sta, false);
+ ret = ath12k_mac_station_assoc(ar, arvif, arsta, false);
if (ret)
ath12k_warn(ar->ab, "Failed to associate station: %pM\n",
- sta->addr);
+ arsta->addr);
spin_lock_bh(&ar->data_lock);
@@ -4044,46 +5578,153 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
arsta->bw_prev = sta->deflink.bandwidth;
spin_unlock_bh(&ar->data_lock);
+
+ /* IEEE80211_STA_ASSOC -> IEEE80211_STA_AUTHORIZED: set peer status as
+ * authorized
+ */
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
- spin_lock_bh(&ar->ab->base_lock);
-
- peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (peer)
- peer->is_authorized = true;
-
- spin_unlock_bh(&ar->ab->base_lock);
+ ret = ath12k_mac_station_authorize(ar, arvif, arsta);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to authorize station: %pM\n",
+ arsta->addr);
- if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
- ret = ath12k_wmi_set_peer_param(ar, sta->addr,
- arvif->vdev_id,
- WMI_PEER_AUTHORIZE,
- 1);
- if (ret)
- ath12k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
- sta->addr, arvif->vdev_id, ret);
- }
+ /* IEEE80211_STA_AUTHORIZED -> IEEE80211_STA_ASSOC: station may be in removal,
+ * deauthorize it.
+ */
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
- spin_lock_bh(&ar->ab->base_lock);
+ ath12k_mac_station_unauthorize(ar, arvif, arsta);
- peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (peer)
- peer->is_authorized = false;
-
- spin_unlock_bh(&ar->ab->base_lock);
+ /* IEEE80211_STA_ASSOC -> IEEE80211_STA_AUTH: disassoc peer connected to
+ * AP/mesh/ADHOC vif type.
+ */
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH &&
(vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT ||
vif->type == NL80211_IFTYPE_ADHOC)) {
- ret = ath12k_station_disassoc(ar, vif, sta);
+ ret = ath12k_mac_station_disassoc(ar, arvif, arsta);
if (ret)
ath12k_warn(ar->ab, "Failed to disassociate station: %pM\n",
- sta->addr);
+ arsta->addr);
}
- mutex_unlock(&ar->conf_mutex);
+exit:
+ return ret;
+}
+
+static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_link_vif *arvif;
+ struct ath12k_link_sta *arsta;
+ unsigned long valid_links;
+ u8 link_id = 0;
+ int ret;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ if (ieee80211_vif_is_mld(vif) && sta->valid_links) {
+ WARN_ON(!sta->mlo && hweight16(sta->valid_links) != 1);
+ link_id = ffs(sta->valid_links) - 1;
+ }
+
+ /* IEEE80211_STA_NOTEXIST -> IEEE80211_STA_NONE:
+ * New station add received. If this is a ML station then
+ * ahsta->links_map will be zero and sta->valid_links will be 1.
+ * Assign default link to the first link sta.
+ */
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ memset(ahsta, 0, sizeof(*ahsta));
+
+ arsta = &ahsta->deflink;
+
+ /* ML sta */
+ if (sta->mlo && !ahsta->links_map &&
+ (hweight16(sta->valid_links) == 1)) {
+ ret = ath12k_peer_ml_create(ah, sta);
+ if (ret) {
+ ath12k_hw_warn(ah, "unable to create ML peer for sta %pM",
+ sta->addr);
+ goto exit;
+ }
+ }
+
+ ret = ath12k_mac_assign_link_sta(ah, ahsta, arsta, ahvif,
+ link_id);
+ if (ret) {
+ ath12k_hw_warn(ah, "unable assign link %d for sta %pM",
+ link_id, sta->addr);
+ goto exit;
+ }
+
+ /* above arsta will get memset, hence do this after assign
+ * link sta
+ */
+ if (sta->mlo) {
+ arsta->is_assoc_link = true;
+ ahsta->assoc_link_id = link_id;
+ }
+ }
+
+ /* In the ML station scenario, activate all partner links once the
+ * client is transitioning to the associated state.
+ *
+ * FIXME: Ideally, this activation should occur when the client
+ * transitions to the authorized state. However, there are some
+ * issues with handling this in the firmware. Until the firmware
+ * can manage it properly, activate the links when the client is
+ * about to move to the associated state.
+ */
+ if (ieee80211_vif_is_mld(vif) && vif->type == NL80211_IFTYPE_STATION &&
+ old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC)
+ ieee80211_set_active_links(vif, ieee80211_vif_usable_links(vif));
+
+ /* Handle all the other state transitions in generic way */
+ valid_links = ahsta->links_map;
+ for_each_set_bit(link_id, &valid_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ arsta = wiphy_dereference(hw->wiphy, ahsta->link[link_id]);
+ /* some assumptions went wrong! */
+ if (WARN_ON(!arvif || !arsta))
+ continue;
+
+ /* vdev might be in deleted */
+ if (WARN_ON(!arvif->ar))
+ continue;
+
+ ret = ath12k_mac_handle_link_sta_state(hw, arvif, arsta,
+ old_state, new_state);
+ if (ret) {
+ ath12k_hw_warn(ah, "unable to move link sta %d of sta %pM from state %d to %d",
+ link_id, arsta->addr, old_state, new_state);
+ goto exit;
+ }
+ }
+
+ /* IEEE80211_STA_NONE -> IEEE80211_STA_NOTEXIST:
+ * Remove the station from driver (handle ML sta here since that
+ * needs special handling. Normal sta will be handled in generic
+ * handler below
+ */
+ if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST && sta->mlo)
+ ath12k_mac_ml_station_remove(ahvif, ahsta);
+
+ ret = 0;
+
+exit:
+ /* update the state if everything went well */
+ if (!ret)
+ ahsta->state = new_state;
return ret;
}
@@ -4092,28 +5733,41 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
struct ath12k *ar;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_link_vif *arvif;
+ struct ath12k_link_sta *arsta;
+ u8 link_id;
int ret;
s16 txpwr;
+ lockdep_assert_wiphy(hw->wiphy);
+
+ /* TODO: use link id from mac80211 once that's implemented */
+ link_id = 0;
+
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ arsta = wiphy_dereference(hw->wiphy, ahsta->link[link_id]);
+
if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
txpwr = 0;
} else {
txpwr = sta->deflink.txpwr.power;
- if (!txpwr)
- return -EINVAL;
+ if (!txpwr) {
+ ret = -EINVAL;
+ goto out;
+ }
}
- if (txpwr > ATH12K_TX_POWER_MAX_VAL || txpwr < ATH12K_TX_POWER_MIN_VAL)
- return -EINVAL;
-
- ar = ath12k_ah_to_ar(ah);
+ if (txpwr > ATH12K_TX_POWER_MAX_VAL || txpwr < ATH12K_TX_POWER_MIN_VAL) {
+ ret = -EINVAL;
+ goto out;
+ }
- mutex_lock(&ar->conf_mutex);
+ ar = arvif->ar;
- ret = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ ret = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id,
WMI_PEER_USE_FIXED_PWR, txpwr);
if (ret) {
ath12k_warn(ar->ab, "failed to set tx power for station ret: %d\n",
@@ -4122,40 +5776,72 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
}
out:
- mutex_unlock(&ar->conf_mutex);
return ret;
}
-static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- u32 changed)
+static void ath12k_mac_op_link_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ u32 changed)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ieee80211_sta *sta = link_sta->sta;
struct ath12k *ar;
- struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta);
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_link_sta *arsta;
+ struct ath12k_link_vif *arvif;
struct ath12k_peer *peer;
u32 bw, smps;
- ar = ath12k_ah_to_ar(ah);
+ rcu_read_lock();
+ arvif = rcu_dereference(ahvif->link[link_sta->link_id]);
+ if (!arvif) {
+ ath12k_hw_warn(ah, "mac sta rc update failed to fetch link vif on link id %u for peer %pM\n",
+ link_sta->link_id, sta->addr);
+ rcu_read_unlock();
+ return;
+ }
+
+ ar = arvif->ar;
+ arsta = rcu_dereference(ahsta->link[link_sta->link_id]);
+ if (!arsta) {
+ rcu_read_unlock();
+ ath12k_warn(ar->ab, "mac sta rc update failed to fetch link sta on link id %u for peer %pM\n",
+ link_sta->link_id, sta->addr);
+ return;
+ }
spin_lock_bh(&ar->ab->base_lock);
- peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
if (!peer) {
spin_unlock_bh(&ar->ab->base_lock);
+ rcu_read_unlock();
ath12k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n",
- sta->addr, arvif->vdev_id);
+ arsta->addr, arvif->vdev_id);
return;
}
spin_unlock_bh(&ar->ab->base_lock);
+ if (arsta->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
+ rcu_read_unlock();
+ return;
+ }
+
+ link_sta = rcu_dereference(sta->link[arsta->link_id]);
+ if (!link_sta) {
+ rcu_read_unlock();
+ ath12k_warn(ar->ab, "unable to access link sta in rc update for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
- sta->addr, changed, sta->deflink.bandwidth, sta->deflink.rx_nss,
- sta->deflink.smps_mode);
+ arsta->addr, changed, link_sta->bandwidth, link_sta->rx_nss,
+ link_sta->smps_mode);
spin_lock_bh(&ar->data_lock);
@@ -4166,12 +5852,12 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
}
if (changed & IEEE80211_RC_NSS_CHANGED)
- arsta->nss = sta->deflink.rx_nss;
+ arsta->nss = link_sta->rx_nss;
if (changed & IEEE80211_RC_SMPS_CHANGED) {
smps = WMI_PEER_SMPS_PS_NONE;
- switch (sta->deflink.smps_mode) {
+ switch (link_sta->smps_mode) {
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_OFF:
smps = WMI_PEER_SMPS_PS_NONE;
@@ -4183,8 +5869,8 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
smps = WMI_PEER_SMPS_DYNAMIC;
break;
default:
- ath12k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n",
- sta->deflink.smps_mode, sta->addr);
+ ath12k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM link %u\n",
+ link_sta->smps_mode, arsta->addr, link_sta->link_id);
smps = WMI_PEER_SMPS_PS_NONE;
break;
}
@@ -4196,17 +5882,124 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->data_lock);
- ieee80211_queue_work(hw, &arsta->update_wk);
+ wiphy_work_queue(hw->wiphy, &arsta->update_wk);
+
+ rcu_read_unlock();
}
-static int ath12k_conf_tx_uapsd(struct ath12k_vif *arvif,
+static struct ath12k_link_sta *ath12k_mac_alloc_assign_link_sta(struct ath12k_hw *ah,
+ struct ath12k_sta *ahsta,
+ struct ath12k_vif *ahvif,
+ u8 link_id)
+{
+ struct ath12k_link_sta *arsta;
+ int ret;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+ return NULL;
+
+ arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+ if (arsta)
+ return NULL;
+
+ arsta = kmalloc(sizeof(*arsta), GFP_KERNEL);
+ if (!arsta)
+ return NULL;
+
+ ret = ath12k_mac_assign_link_sta(ah, ahsta, arsta, ahvif, link_id);
+ if (ret) {
+ kfree(arsta);
+ return NULL;
+ }
+
+ return arsta;
+}
+
+static int ath12k_mac_op_change_sta_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links)
+{
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ struct ath12k_hw *ah = hw->priv;
+ struct ath12k_link_vif *arvif;
+ struct ath12k_link_sta *arsta;
+ unsigned long valid_links;
+ struct ath12k *ar;
+ u8 link_id;
+ int ret;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ if (!sta->valid_links)
+ return -EINVAL;
+
+ /* Firmware does not support removal of one of link stas. All sta
+ * would be removed during ML STA delete in sta_state(), hence link
+ * sta removal is not handled here.
+ */
+ if (new_links < old_links)
+ return 0;
+
+ if (ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) {
+ ath12k_hw_warn(ah, "unable to add link for ml sta %pM", sta->addr);
+ return -EINVAL;
+ }
+
+ /* this op is expected only after initial sta insertion with default link */
+ if (WARN_ON(ahsta->links_map == 0))
+ return -EINVAL;
+
+ valid_links = new_links;
+ for_each_set_bit(link_id, &valid_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ if (ahsta->links_map & BIT(link_id))
+ continue;
+
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ arsta = ath12k_mac_alloc_assign_link_sta(ah, ahsta, ahvif, link_id);
+
+ if (!arvif || !arsta) {
+ ath12k_hw_warn(ah, "Failed to alloc/assign link sta");
+ continue;
+ }
+
+ ar = arvif->ar;
+ if (!ar)
+ continue;
+
+ ret = ath12k_mac_station_add(ar, arvif, arsta);
+ if (ret) {
+ ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
+ arsta->addr, arvif->vdev_id);
+ ath12k_mac_free_unassign_link_sta(ah, ahsta, link_id);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static bool ath12k_mac_op_can_activate_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 active_links)
+{
+ /* TODO: Handle recovery case */
+
+ return true;
+}
+
+static int ath12k_conf_tx_uapsd(struct ath12k_link_vif *arvif,
u16 ac, bool enable)
{
struct ath12k *ar = arvif->ar;
+ struct ath12k_vif *ahvif = arvif->ahvif;
u32 value;
int ret;
- if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ if (ahvif->vdev_type != WMI_VDEV_TYPE_STA)
return 0;
switch (ac) {
@@ -4229,19 +6022,19 @@ static int ath12k_conf_tx_uapsd(struct ath12k_vif *arvif,
}
if (enable)
- arvif->u.sta.uapsd |= value;
+ ahvif->u.sta.uapsd |= value;
else
- arvif->u.sta.uapsd &= ~value;
+ ahvif->u.sta.uapsd &= ~value;
ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
WMI_STA_PS_PARAM_UAPSD,
- arvif->u.sta.uapsd);
+ ahvif->u.sta.uapsd);
if (ret) {
ath12k_warn(ar->ab, "could not set uapsd params %d\n", ret);
goto exit;
}
- if (arvif->u.sta.uapsd)
+ if (ahvif->u.sta.uapsd)
value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
else
value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
@@ -4256,8 +6049,7 @@ exit:
return ret;
}
-static int ath12k_mac_conf_tx(struct ath12k_vif *arvif,
- unsigned int link_id, u16 ac,
+static int ath12k_mac_conf_tx(struct ath12k_link_vif *arvif, u16 ac,
const struct ieee80211_tx_queue_params *params)
{
struct wmi_wmm_params_arg *p = NULL;
@@ -4265,7 +6057,7 @@ static int ath12k_mac_conf_tx(struct ath12k_vif *arvif,
struct ath12k_base *ab = ar->ab;
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
switch (ac) {
case IEEE80211_AC_VO:
@@ -4314,16 +6106,30 @@ static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw,
unsigned int link_id, u16 ac,
const struct ieee80211_tx_queue_params *params)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_link_vif *arvif;
+ struct ath12k_vif_cache *cache;
int ret;
- ar = ath12k_ah_to_ar(ah);
+ lockdep_assert_wiphy(hw->wiphy);
+
+ if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+ return -EINVAL;
+
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ if (!arvif || !arvif->is_created) {
+ cache = ath12k_ahvif_get_link_cache(ahvif, link_id);
+ if (!cache)
+ return -ENOSPC;
+
+ cache->tx_conf.changed = true;
+ cache->tx_conf.ac = ac;
+ cache->tx_conf.tx_queue_params = *params;
- mutex_lock(&ar->conf_mutex);
- ret = ath12k_mac_conf_tx(arvif, link_id, ac, params);
- mutex_unlock(&ar->conf_mutex);
+ return 0;
+ }
+
+ ret = ath12k_mac_conf_tx(arvif, ac, params);
return ret;
}
@@ -4394,10 +6200,11 @@ ath12k_create_ht_cap(struct ath12k *ar, u32 ar_ht_cap, u32 rate_cap_rx_chainmask
return ht_cap;
}
-static int ath12k_mac_set_txbf_conf(struct ath12k_vif *arvif)
+static int ath12k_mac_set_txbf_conf(struct ath12k_link_vif *arvif)
{
u32 value = 0;
struct ath12k *ar = arvif->ar;
+ struct ath12k_vif *ahvif = arvif->ahvif;
int nsts;
int sound_dim;
u32 vht_cap = ar->pdev->cap.vht_cap;
@@ -4425,7 +6232,7 @@ static int ath12k_mac_set_txbf_conf(struct ath12k_vif *arvif)
value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) &&
- arvif->vdev_type == WMI_VDEV_TYPE_AP)
+ ahvif->vdev_type == WMI_VDEV_TYPE_AP)
value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
}
@@ -4433,7 +6240,7 @@ static int ath12k_mac_set_txbf_conf(struct ath12k_vif *arvif)
value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) &&
- arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ ahvif->vdev_type == WMI_VDEV_TYPE_STA)
value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
}
@@ -4987,9 +6794,10 @@ static void ath12k_mac_setup_sband_iftype_data(struct ath12k *ar,
static int __ath12k_set_antenna(struct ath12k *ar, u32 tx_ant, u32 rx_ant)
{
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
if (ath12k_check_chain_mask(ar, tx_ant, true))
return -EINVAL;
@@ -4997,11 +6805,18 @@ static int __ath12k_set_antenna(struct ath12k *ar, u32 tx_ant, u32 rx_ant)
if (ath12k_check_chain_mask(ar, rx_ant, false))
return -EINVAL;
+ /* Since we advertised the max cap of all radios combined during wiphy
+ * registration, ensure we don't set the antenna config higher than the
+ * limits
+ */
+ tx_ant = min_t(u32, tx_ant, ar->pdev->cap.tx_chain_mask);
+ rx_ant = min_t(u32, rx_ant, ar->pdev->cap.rx_chain_mask);
+
ar->cfg_tx_chainmask = tx_ant;
ar->cfg_rx_chainmask = rx_ant;
- if (ar->state != ATH12K_STATE_ON &&
- ar->state != ATH12K_STATE_RESTARTED)
+ if (ah->state != ATH12K_HW_STATE_ON &&
+ ah->state != ATH12K_HW_STATE_RESTARTED)
return 0;
ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_TX_CHAIN_MASK,
@@ -5035,6 +6850,8 @@ static void ath12k_mgmt_over_wmi_tx_drop(struct ath12k *ar, struct sk_buff *skb)
{
int num_mgmt;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
ieee80211_free_txskb(ath12k_ar_to_hw(ar), skb);
num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
@@ -5086,7 +6903,7 @@ static int ath12k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx)
return 0;
}
-static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_vif *arvif,
+static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_link_vif *arvif,
struct sk_buff *skb)
{
struct ath12k_base *ab = ar->ab;
@@ -5096,6 +6913,8 @@ static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_vif *arvif,
int buf_id;
int ret;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
ATH12K_SKB_CB(skb)->ar = ar;
spin_lock_bh(&ar->txmgmt_idr_lock);
buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0,
@@ -5150,14 +6969,18 @@ static void ath12k_mgmt_over_wmi_tx_purge(struct ath12k *ar)
ath12k_mgmt_over_wmi_tx_drop(ar, skb);
}
-static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work)
+static void ath12k_mgmt_over_wmi_tx_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct ath12k *ar = container_of(work, struct ath12k, wmi_mgmt_tx_work);
+ struct ath12k_hw *ah = ar->ah;
struct ath12k_skb_cb *skb_cb;
- struct ath12k_vif *arvif;
+ struct ath12k_vif *ahvif;
+ struct ath12k_link_vif *arvif;
struct sk_buff *skb;
int ret;
+ lockdep_assert_wiphy(wiphy);
+
while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) {
skb_cb = ATH12K_SKB_CB(skb);
if (!skb_cb->vif) {
@@ -5166,8 +6989,16 @@ static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work)
continue;
}
- arvif = ath12k_vif_to_arvif(skb_cb->vif);
+ ahvif = ath12k_vif_to_ahvif(skb_cb->vif);
+ if (!(ahvif->links_map & BIT(skb_cb->link_id))) {
+ ath12k_warn(ar->ab,
+ "invalid linkid %u in mgmt over wmi tx with linkmap 0x%x\n",
+ skb_cb->link_id, ahvif->links_map);
+ ath12k_mgmt_over_wmi_tx_drop(ar, skb);
+ continue;
+ }
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[skb_cb->link_id]);
if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) {
ret = ath12k_mac_mgmt_tx_wmi(ar, arvif, skb);
if (ret) {
@@ -5177,8 +7008,9 @@ static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work)
}
} else {
ath12k_warn(ar->ab,
- "dropping mgmt frame for vdev %d, is_started %d\n",
+ "dropping mgmt frame for vdev %d link %u is_started %d\n",
arvif->vdev_id,
+ skb_cb->link_id,
arvif->is_started);
ath12k_mgmt_over_wmi_tx_drop(ar, skb);
}
@@ -5212,7 +7044,7 @@ static int ath12k_mac_mgmt_tx(struct ath12k *ar, struct sk_buff *skb,
skb_queue_tail(q, skb);
atomic_inc(&ar->num_pending_mgmt_tx);
- ieee80211_queue_work(ath12k_ar_to_hw(ar), &ar->wmi_mgmt_tx_work);
+ wiphy_work_queue(ath12k_ar_to_hw(ar)->wiphy, &ar->wmi_mgmt_tx_work);
return 0;
}
@@ -5222,22 +7054,121 @@ static void ath12k_mac_add_p2p_noa_ie(struct ath12k *ar,
struct sk_buff *skb,
bool is_prb_rsp)
{
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
if (likely(!is_prb_rsp))
return;
spin_lock_bh(&ar->data_lock);
- if (arvif->u.ap.noa_data &&
- !pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
+ if (ahvif->u.ap.noa_data &&
+ !pskb_expand_head(skb, 0, ahvif->u.ap.noa_len,
GFP_ATOMIC))
- skb_put_data(skb, arvif->u.ap.noa_data,
- arvif->u.ap.noa_len);
+ skb_put_data(skb, ahvif->u.ap.noa_data,
+ ahvif->u.ap.noa_len);
spin_unlock_bh(&ar->data_lock);
}
+/* Note: called under rcu_read_lock() */
+static u8 ath12k_mac_get_tx_link(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
+ u8 link, struct sk_buff *skb, u32 info_flags)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ieee80211_link_sta *link_sta;
+ struct ieee80211_bss_conf *bss_conf;
+ struct ath12k_sta *ahsta;
+
+ /* Use the link id passed or the default vif link */
+ if (!sta) {
+ if (link != IEEE80211_LINK_UNSPECIFIED)
+ return link;
+
+ return ahvif->deflink.link_id;
+ }
+
+ ahsta = ath12k_sta_to_ahsta(sta);
+
+ /* Below translation ensures we pass proper A2 & A3 for non ML clients.
+ * Also it assumes for now support only for MLO AP in this path
+ */
+ if (!sta->mlo) {
+ link = ahsta->deflink.link_id;
+
+ if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
+ return link;
+
+ bss_conf = rcu_dereference(vif->link_conf[link]);
+ if (bss_conf) {
+ ether_addr_copy(hdr->addr2, bss_conf->addr);
+ if (!ieee80211_has_tods(hdr->frame_control) &&
+ !ieee80211_has_fromds(hdr->frame_control))
+ ether_addr_copy(hdr->addr3, bss_conf->addr);
+ }
+
+ return link;
+ }
+
+ /* enqueue eth enacap & data frames on primary link, FW does link
+ * selection and address translation.
+ */
+ if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP ||
+ ieee80211_is_data(hdr->frame_control))
+ return ahsta->assoc_link_id;
+
+ /* 802.11 frame cases */
+ if (link == IEEE80211_LINK_UNSPECIFIED)
+ link = ahsta->deflink.link_id;
+
+ if (!ieee80211_is_mgmt(hdr->frame_control))
+ return link;
+
+ /* Perform address conversion for ML STA Tx */
+ bss_conf = rcu_dereference(vif->link_conf[link]);
+ link_sta = rcu_dereference(sta->link[link]);
+
+ if (bss_conf && link_sta) {
+ ether_addr_copy(hdr->addr1, link_sta->addr);
+ ether_addr_copy(hdr->addr2, bss_conf->addr);
+
+ if (vif->type == NL80211_IFTYPE_STATION && bss_conf->bssid)
+ ether_addr_copy(hdr->addr3, bss_conf->bssid);
+ else if (vif->type == NL80211_IFTYPE_AP)
+ ether_addr_copy(hdr->addr3, bss_conf->addr);
+
+ return link;
+ }
+
+ if (bss_conf) {
+ /* In certain cases where a ML sta associated and added subset of
+ * links on which the ML AP is active, but now sends some frame
+ * (ex. Probe request) on a different link which is active in our
+ * MLD but was not added during previous association, we can
+ * still honor the Tx to that ML STA via the requested link.
+ * The control would reach here in such case only when that link
+ * address is same as the MLD address or in worst case clients
+ * used MLD address at TA wrongly which would have helped
+ * identify the ML sta object and pass it here.
+ * If the link address of that STA is different from MLD address,
+ * then the sta object would be NULL and control won't reach
+ * here but return at the start of the function itself with !sta
+ * check. Also this would not need any translation at hdr->addr1
+ * from MLD to link address since the RA is the MLD address
+ * (same as that link address ideally) already.
+ */
+ ether_addr_copy(hdr->addr2, bss_conf->addr);
+
+ if (vif->type == NL80211_IFTYPE_STATION && bss_conf->bssid)
+ ether_addr_copy(hdr->addr3, bss_conf->bssid);
+ else if (vif->type == NL80211_IFTYPE_AP)
+ ether_addr_copy(hdr->addr3, bss_conf->addr);
+ }
+
+ return link;
+}
+
+/* Note: called under rcu_read_lock() */
static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
@@ -5245,14 +7176,18 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
- struct ath12k *ar = arvif->ar;
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_link_vif *arvif = &ahvif->deflink;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ieee80211_sta *sta = control->sta;
u32 info_flags = info->flags;
+ struct ath12k *ar;
bool is_prb_rsp;
+ u8 link_id;
int ret;
+ link_id = u32_get_bits(info->control.flags, IEEE80211_TX_CTRL_MLO_LINK);
memset(skb_cb, 0, sizeof(*skb_cb));
skb_cb->vif = vif;
@@ -5261,6 +7196,27 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
skb_cb->flags |= ATH12K_SKB_CIPHER_SET;
}
+ /* handle only for MLO case, use deflink for non MLO case */
+ if (ieee80211_vif_is_mld(vif)) {
+ link_id = ath12k_mac_get_tx_link(sta, vif, link_id, skb, info_flags);
+ if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+ } else {
+ link_id = 0;
+ }
+
+ arvif = rcu_dereference(ahvif->link[link_id]);
+ if (!arvif || !arvif->ar) {
+ ath12k_warn(ahvif->ah, "failed to find arvif link id %u for frame transmission",
+ link_id);
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+
+ ar = arvif->ar;
+ skb_cb->link_id = link_id;
is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
@@ -5288,10 +7244,12 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
void ath12k_mac_drain_tx(struct ath12k *ar)
{
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
/* make sure rcu-protected mac80211 tx path itself is drained */
synchronize_net();
- cancel_work_sync(&ar->wmi_mgmt_tx_work);
+ wiphy_work_cancel(ath12k_ar_to_hw(ar)->wiphy, &ar->wmi_mgmt_tx_work);
ath12k_mgmt_over_wmi_tx_purge(ar);
}
@@ -5301,51 +7259,15 @@ static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable)
/* TODO: Need to support new monitor mode */
}
-static void ath12k_mac_wait_reconfigure(struct ath12k_base *ab)
-{
- int recovery_start_count;
-
- if (!ab->is_reset)
- return;
-
- recovery_start_count = atomic_inc_return(&ab->recovery_start_count);
-
- ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery start count %d\n", recovery_start_count);
-
- if (recovery_start_count == ab->num_radios) {
- complete(&ab->recovery_start);
- ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery started success\n");
- }
-
- ath12k_dbg(ab, ATH12K_DBG_MAC, "waiting reconfigure...\n");
-
- wait_for_completion_timeout(&ab->reconfigure_complete,
- ATH12K_RECONFIGURE_TIMEOUT_HZ);
-}
-
static int ath12k_mac_start(struct ath12k *ar)
{
+ struct ath12k_hw *ah = ar->ah;
struct ath12k_base *ab = ar->ab;
struct ath12k_pdev *pdev = ar->pdev;
int ret;
- mutex_lock(&ar->conf_mutex);
-
- switch (ar->state) {
- case ATH12K_STATE_OFF:
- ar->state = ATH12K_STATE_ON;
- break;
- case ATH12K_STATE_RESTARTING:
- ar->state = ATH12K_STATE_RESTARTED;
- ath12k_mac_wait_reconfigure(ab);
- break;
- case ATH12K_STATE_RESTARTED:
- case ATH12K_STATE_WEDGED:
- case ATH12K_STATE_ON:
- WARN_ON(1);
- ret = -EINVAL;
- goto err;
- }
+ lockdep_assert_held(&ah->hw_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS,
1, pdev->pdev_id);
@@ -5430,36 +7352,74 @@ static int ath12k_mac_start(struct ath12k *ar)
}
}
- mutex_unlock(&ar->conf_mutex);
-
rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
&ab->pdevs[ar->pdev_idx]);
return 0;
err:
- ar->state = ATH12K_STATE_OFF;
- mutex_unlock(&ar->conf_mutex);
return ret;
}
+static void ath12k_drain_tx(struct ath12k_hw *ah)
+{
+ struct ath12k *ar;
+ int i;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ for_each_ar(ah, ar, i)
+ ath12k_mac_drain_tx(ar);
+}
+
static int ath12k_mac_op_start(struct ieee80211_hw *hw)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar = ath12k_ah_to_ar(ah);
- struct ath12k_base *ab = ar->ab;
- int ret;
+ struct ath12k *ar;
+ int ret, i;
- ath12k_mac_drain_tx(ar);
+ lockdep_assert_wiphy(hw->wiphy);
- ret = ath12k_mac_start(ar);
- if (ret) {
- ath12k_err(ab, "fail to start mac operations in pdev idx %d ret %d\n",
- ar->pdev_idx, ret);
- return ret;
+ ath12k_drain_tx(ah);
+
+ guard(mutex)(&ah->hw_mutex);
+
+ switch (ah->state) {
+ case ATH12K_HW_STATE_OFF:
+ ah->state = ATH12K_HW_STATE_ON;
+ break;
+ case ATH12K_HW_STATE_RESTARTING:
+ ah->state = ATH12K_HW_STATE_RESTARTED;
+ break;
+ case ATH12K_HW_STATE_RESTARTED:
+ case ATH12K_HW_STATE_WEDGED:
+ case ATH12K_HW_STATE_ON:
+ ah->state = ATH12K_HW_STATE_OFF;
+
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ for_each_ar(ah, ar, i) {
+ ret = ath12k_mac_start(ar);
+ if (ret) {
+ ah->state = ATH12K_HW_STATE_OFF;
+
+ ath12k_err(ar->ab, "fail to start mac operations in pdev idx %d ret %d\n",
+ ar->pdev_idx, ret);
+ goto fail_start;
+ }
}
return 0;
+
+fail_start:
+ for (; i > 0; i--) {
+ ar = ath12k_ah_to_ar(ah, i - 1);
+ ath12k_mac_stop(ar);
+ }
+
+ return ret;
}
int ath12k_mac_rfkill_config(struct ath12k *ar)
@@ -5521,20 +7481,22 @@ int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable)
static void ath12k_mac_stop(struct ath12k *ar)
{
+ struct ath12k_hw *ah = ar->ah;
struct htt_ppdu_stats_info *ppdu_stats, *tmp;
int ret;
- mutex_lock(&ar->conf_mutex);
+ lockdep_assert_held(&ah->hw_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
ret = ath12k_mac_config_mon_status_default(ar, false);
if (ret && (ret != -EOPNOTSUPP))
ath12k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n",
ret);
- clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
- ar->state = ATH12K_STATE_OFF;
- mutex_unlock(&ar->conf_mutex);
+ clear_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags);
cancel_delayed_work_sync(&ar->scan.timeout);
+ wiphy_work_cancel(ath12k_ar_to_hw(ar)->wiphy, &ar->scan.vdev_clean_wk);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ar->ab->rfkill_work);
@@ -5552,18 +7514,28 @@ static void ath12k_mac_stop(struct ath12k *ar)
atomic_set(&ar->num_pending_mgmt_tx, 0);
}
-static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
+static void ath12k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k *ar;
+ int i;
- ath12k_mac_drain_tx(ar);
+ lockdep_assert_wiphy(hw->wiphy);
+
+ ath12k_drain_tx(ah);
+
+ mutex_lock(&ah->hw_mutex);
- ath12k_mac_stop(ar);
+ ah->state = ATH12K_HW_STATE_OFF;
+
+ for_each_ar(ah, ar, i)
+ ath12k_mac_stop(ar);
+
+ mutex_unlock(&ah->hw_mutex);
}
static u8
-ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif)
+ath12k_mac_get_vdev_stats_id(struct ath12k_link_vif *arvif)
{
struct ath12k_base *ab = arvif->ar->ab;
u8 vdev_stats_id = 0;
@@ -5585,17 +7557,73 @@ ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif)
return vdev_stats_id;
}
-static void ath12k_mac_setup_vdev_create_arg(struct ath12k_vif *arvif,
- struct ath12k_wmi_vdev_create_arg *arg)
+static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_link_vif *arvif,
+ u32 *flags, u32 *tx_vdev_id)
+{
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ieee80211_vif *tx_vif = ahvif->vif->mbssid_tx_vif;
+ struct ieee80211_bss_conf *link_conf;
+ struct ath12k *ar = arvif->ar;
+ struct ath12k_link_vif *tx_arvif;
+ struct ath12k_vif *tx_ahvif;
+
+ if (!tx_vif)
+ return 0;
+
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in set mbssid params for vif %pM link %u\n",
+ ahvif->vif->addr, arvif->link_id);
+ return -ENOLINK;
+ }
+
+ tx_ahvif = ath12k_vif_to_ahvif(tx_vif);
+ tx_arvif = &tx_ahvif->deflink;
+
+ if (link_conf->nontransmitted) {
+ if (ar->ah->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy)
+ return -EINVAL;
+
+ *flags = WMI_VDEV_MBSSID_FLAGS_NON_TRANSMIT_AP;
+ *tx_vdev_id = tx_arvif->vdev_id;
+ } else if (tx_arvif == arvif) {
+ *flags = WMI_VDEV_MBSSID_FLAGS_TRANSMIT_AP;
+ } else {
+ return -EINVAL;
+ }
+
+ if (link_conf->ema_ap)
+ *flags |= WMI_VDEV_MBSSID_FLAGS_EMA_MODE;
+
+ return 0;
+}
+
+static int ath12k_mac_setup_vdev_create_arg(struct ath12k_link_vif *arvif,
+ struct ath12k_wmi_vdev_create_arg *arg)
{
struct ath12k *ar = arvif->ar;
struct ath12k_pdev *pdev = ar->pdev;
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
arg->if_id = arvif->vdev_id;
- arg->type = arvif->vdev_type;
- arg->subtype = arvif->vdev_subtype;
+ arg->type = ahvif->vdev_type;
+ arg->subtype = ahvif->vdev_subtype;
arg->pdev_id = pdev->pdev_id;
+ arg->mbssid_flags = WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP;
+ arg->mbssid_tx_vdev_id = 0;
+ if (!test_bit(WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ ret = ath12k_mac_setup_vdev_params_mbssid(arvif,
+ &arg->mbssid_flags,
+ &arg->mbssid_tx_vdev_id);
+ if (ret)
+ return ret;
+ }
+
if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
arg->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
arg->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
@@ -5611,6 +7639,18 @@ static void ath12k_mac_setup_vdev_create_arg(struct ath12k_vif *arvif,
}
arg->if_stats_id = ath12k_mac_get_vdev_stats_id(arvif);
+
+ if (ath12k_mac_is_ml_arvif(arvif)) {
+ if (hweight16(ahvif->vif->valid_links) > ATH12K_WMI_MLO_MAX_LINKS) {
+ ath12k_warn(ar->ab, "too many MLO links during setting up vdev: %d",
+ ahvif->vif->valid_links);
+ return -EINVAL;
+ }
+
+ ether_addr_copy(arg->mld_addr, ahvif->vif->addr);
+ }
+
+ return 0;
}
static u32
@@ -5648,14 +7688,15 @@ ath12k_mac_prepare_he_mode(struct ath12k_pdev *pdev, u32 viftype)
}
static int ath12k_set_he_mu_sounding_mode(struct ath12k *ar,
- struct ath12k_vif *arvif)
+ struct ath12k_link_vif *arvif)
{
u32 param_id, param_value;
struct ath12k_base *ab = ar->ab;
+ struct ath12k_vif *ahvif = arvif->ahvif;
int ret;
param_id = WMI_VDEV_PARAM_SET_HEMU_MODE;
- param_value = ath12k_mac_prepare_he_mode(ar->pdev, arvif->vif->type);
+ param_value = ath12k_mac_prepare_he_mode(ar->pdev, ahvif->vif->type);
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, param_value);
if (ret) {
@@ -5678,9 +7719,10 @@ static int ath12k_set_he_mu_sounding_mode(struct ath12k *ar,
return ret;
}
-static void ath12k_mac_update_vif_offload(struct ath12k_vif *arvif)
+static void ath12k_mac_update_vif_offload(struct ath12k_link_vif *arvif)
{
- struct ieee80211_vif *vif = arvif->vif;
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
u32 param_id, param_value;
@@ -5693,14 +7735,14 @@ static void ath12k_mac_update_vif_offload(struct ath12k_vif *arvif)
IEEE80211_OFFLOAD_DECAP_ENABLED);
if (vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED)
- arvif->tx_encap_type = ATH12K_HW_TXRX_ETHERNET;
+ ahvif->tx_encap_type = ATH12K_HW_TXRX_ETHERNET;
else if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
- arvif->tx_encap_type = ATH12K_HW_TXRX_RAW;
+ ahvif->tx_encap_type = ATH12K_HW_TXRX_RAW;
else
- arvif->tx_encap_type = ATH12K_HW_TXRX_NATIVE_WIFI;
+ ahvif->tx_encap_type = ATH12K_HW_TXRX_NATIVE_WIFI;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
- param_id, arvif->tx_encap_type);
+ param_id, ahvif->tx_encap_type);
if (ret) {
ath12k_warn(ab, "failed to set vdev %d tx encap mode: %d\n",
arvif->vdev_id, ret);
@@ -5727,121 +7769,125 @@ static void ath12k_mac_update_vif_offload(struct ath12k_vif *arvif)
static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_link_vif *arvif;
+ unsigned long links;
+ int link_id;
- ath12k_mac_update_vif_offload(arvif);
+ lockdep_assert_wiphy(hw->wiphy);
+
+ if (vif->valid_links) {
+ links = vif->valid_links;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ if (!(arvif && arvif->ar))
+ continue;
+
+ ath12k_mac_update_vif_offload(arvif);
+ }
+
+ return;
+ }
+
+ ath12k_mac_update_vif_offload(&ahvif->deflink);
}
-static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar;
- struct ath12k_base *ab;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_hw *ah = ar->ah;
+ struct ath12k_base *ab = ar->ab;
+ struct ieee80211_hw *hw = ah->hw;
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
struct ath12k_wmi_vdev_create_arg vdev_arg = {0};
- struct ath12k_wmi_peer_create_arg peer_param;
+ struct ath12k_wmi_peer_create_arg peer_param = {0};
+ struct ieee80211_bss_conf *link_conf;
u32 param_id, param_value;
u16 nss;
int i;
- int ret;
- int bit;
-
- vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
-
- ar = ath12k_ah_to_ar(ah);
- ab = ar->ab;
-
- mutex_lock(&ar->conf_mutex);
-
- if (vif->type == NL80211_IFTYPE_AP &&
- ar->num_peers > (ar->max_num_peers - 1)) {
- ath12k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n");
- ret = -ENOBUFS;
- goto err;
- }
+ int ret, vdev_id;
+ u8 link_id;
- if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
- ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n",
- TARGET_NUM_VDEVS);
- ret = -EBUSY;
- goto err;
- }
-
- memset(arvif, 0, sizeof(*arvif));
-
- arvif->ar = ar;
- arvif->vif = vif;
-
- INIT_LIST_HEAD(&arvif->list);
+ lockdep_assert_wiphy(hw->wiphy);
- /* Should we initialize any worker to handle connection loss indication
- * from firmware in sta mode?
+ /* If no link is active and scan vdev is requested
+ * use a default link conf for scan address purpose.
*/
+ if (arvif->link_id == ATH12K_DEFAULT_SCAN_LINK && vif->valid_links)
+ link_id = ffs(vif->valid_links) - 1;
+ else
+ link_id = arvif->link_id;
- for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
- arvif->bitrate_mask.control[i].legacy = 0xffffffff;
- memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
- sizeof(arvif->bitrate_mask.control[i].ht_mcs));
- memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
- sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+ link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[link_id]);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in vdev create for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return -ENOLINK;
}
- bit = __ffs64(ab->free_vdev_map);
+ memcpy(arvif->bssid, link_conf->addr, ETH_ALEN);
- arvif->vdev_id = bit;
- arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
+ arvif->ar = ar;
+ vdev_id = __ffs64(ab->free_vdev_map);
+ arvif->vdev_id = vdev_id;
+ ahvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
switch (vif->type) {
case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_STATION:
- arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ ahvif->vdev_type = WMI_VDEV_TYPE_STA;
if (vif->p2p)
- arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT;
+ ahvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT;
break;
case NL80211_IFTYPE_MESH_POINT:
- arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
+ ahvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
fallthrough;
case NL80211_IFTYPE_AP:
- arvif->vdev_type = WMI_VDEV_TYPE_AP;
+ ahvif->vdev_type = WMI_VDEV_TYPE_AP;
if (vif->p2p)
- arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO;
+ ahvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO;
break;
case NL80211_IFTYPE_MONITOR:
- arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
- ar->monitor_vdev_id = bit;
+ ahvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
+ ar->monitor_vdev_id = vdev_id;
break;
case NL80211_IFTYPE_P2P_DEVICE:
- arvif->vdev_type = WMI_VDEV_TYPE_STA;
- arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
+ ahvif->vdev_type = WMI_VDEV_TYPE_STA;
+ ahvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
break;
default:
WARN_ON(1);
break;
}
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac add interface id %d type %d subtype %d map %llx\n",
- arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev create id %d type %d subtype %d map %llx\n",
+ arvif->vdev_id, ahvif->vdev_type, ahvif->vdev_subtype,
ab->free_vdev_map);
vif->cab_queue = arvif->vdev_id % (ATH12K_HW_MAX_QUEUES - 1);
for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
vif->hw_queue[i] = i % (ATH12K_HW_MAX_QUEUES - 1);
- ath12k_mac_setup_vdev_create_arg(arvif, &vdev_arg);
+ ret = ath12k_mac_setup_vdev_create_arg(arvif, &vdev_arg);
+ if (ret) {
+ ath12k_warn(ab, "failed to create vdev parameters %d: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
- ret = ath12k_wmi_vdev_create(ar, vif->addr, &vdev_arg);
+ ret = ath12k_wmi_vdev_create(ar, arvif->bssid, &vdev_arg);
if (ret) {
ath12k_warn(ab, "failed to create WMI vdev %d: %d\n",
arvif->vdev_id, ret);
- goto err;
+ return ret;
}
ar->num_created_vdevs++;
+ arvif->is_created = true;
ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM created, vdev_id %d\n",
vif->addr, arvif->vdev_id);
ar->allocated_vdev_map |= 1LL << arvif->vdev_id;
@@ -5862,10 +7908,10 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
goto err_vdev_del;
}
- switch (arvif->vdev_type) {
+ switch (ahvif->vdev_type) {
case WMI_VDEV_TYPE_AP:
peer_param.vdev_id = arvif->vdev_id;
- peer_param.peer_addr = vif->addr;
+ peer_param.peer_addr = arvif->bssid;
peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
ret = ath12k_peer_create(ar, arvif, NULL, &peer_param);
if (ret) {
@@ -5923,7 +7969,7 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
break;
}
- arvif->txpower = vif->bss_conf.txpower;
+ arvif->txpower = link_conf->txpower;
ret = ath12k_mac_txpower_recalc(ar);
if (ret)
goto err_peer_del;
@@ -5938,30 +7984,27 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
}
ath12k_dp_vdev_tx_attach(ar, arvif);
-
if (vif->type != NL80211_IFTYPE_MONITOR && ar->monitor_conf_enabled)
ath12k_mac_monitor_vdev_create(ar);
- mutex_unlock(&ar->conf_mutex);
-
return ret;
err_peer_del:
- if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
reinit_completion(&ar->peer_delete_done);
- ret = ath12k_wmi_send_peer_delete_cmd(ar, vif->addr,
+ ret = ath12k_wmi_send_peer_delete_cmd(ar, arvif->bssid,
arvif->vdev_id);
if (ret) {
ath12k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
- arvif->vdev_id, vif->addr);
+ arvif->vdev_id, arvif->bssid);
goto err;
}
ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id,
- vif->addr);
+ arvif->bssid);
if (ret)
- goto err;
+ goto err_vdev_del;
ar->num_peers--;
}
@@ -5969,6 +8012,8 @@ err_peer_del:
err_vdev_del:
ath12k_wmi_vdev_delete(ar, arvif->vdev_id);
ar->num_created_vdevs--;
+ arvif->is_created = false;
+ arvif->ar = NULL;
ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
ab->free_vdev_map |= 1LL << arvif->vdev_id;
ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id);
@@ -5977,11 +8022,217 @@ err_vdev_del:
spin_unlock_bh(&ar->data_lock);
err:
- mutex_unlock(&ar->conf_mutex);
-
+ arvif->ar = NULL;
return ret;
}
+static void ath12k_mac_vif_flush_key_cache(struct ath12k_link_vif *arvif)
+{
+ struct ath12k_key_conf *key_conf, *tmp;
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ath12k_hw *ah = ahvif->ah;
+ struct ath12k_sta *ahsta;
+ struct ath12k_link_sta *arsta;
+ struct ath12k_vif_cache *cache = ahvif->cache[arvif->link_id];
+ int ret;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ list_for_each_entry_safe(key_conf, tmp, &cache->key_conf.list, list) {
+ arsta = NULL;
+ if (key_conf->sta) {
+ ahsta = ath12k_sta_to_ahsta(key_conf->sta);
+ arsta = wiphy_dereference(ah->hw->wiphy,
+ ahsta->link[arvif->link_id]);
+ if (!arsta)
+ goto free_cache;
+ }
+
+ ret = ath12k_mac_set_key(arvif->ar, key_conf->cmd,
+ arvif, arsta,
+ key_conf->key);
+ if (ret)
+ ath12k_warn(arvif->ar->ab, "unable to apply set key param to vdev %d ret %d\n",
+ arvif->vdev_id, ret);
+free_cache:
+ list_del(&key_conf->list);
+ kfree(key_conf);
+ }
+}
+
+static void ath12k_mac_vif_cache_flush(struct ath12k *ar, struct ath12k_link_vif *arvif)
+{
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
+ struct ath12k_vif_cache *cache = ahvif->cache[arvif->link_id];
+ struct ath12k_base *ab = ar->ab;
+
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (!cache)
+ return;
+
+ if (cache->tx_conf.changed) {
+ ret = ath12k_mac_conf_tx(arvif, cache->tx_conf.ac,
+ &cache->tx_conf.tx_queue_params);
+ if (ret)
+ ath12k_warn(ab,
+ "unable to apply tx config parameters to vdev %d\n",
+ ret);
+ }
+
+ if (cache->bss_conf_changed) {
+ ath12k_mac_bss_info_changed(ar, arvif, &vif->bss_conf,
+ cache->bss_conf_changed);
+ }
+
+ if (!list_empty(&cache->key_conf.list))
+ ath12k_mac_vif_flush_key_cache(arvif);
+
+ ath12k_ahvif_put_link_cache(ahvif, arvif->link_id);
+}
+
+static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw,
+ struct ath12k_link_vif *arvif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
+ struct ath12k_link_vif *scan_arvif;
+ struct ath12k_hw *ah = hw->priv;
+ struct ath12k *ar;
+ struct ath12k_base *ab;
+ u8 link_id = arvif->link_id;
+ int ret;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ if (ah->num_radio == 1)
+ ar = ah->radio;
+ else if (ctx)
+ ar = ath12k_get_ar_by_ctx(hw, ctx);
+ else
+ return NULL;
+
+ if (!ar)
+ return NULL;
+
+ /* cleanup the scan vdev if we are done scan on that ar
+ * and now we want to create for actual usage.
+ */
+ if (ieee80211_vif_is_mld(vif)) {
+ scan_arvif = wiphy_dereference(hw->wiphy,
+ ahvif->link[ATH12K_DEFAULT_SCAN_LINK]);
+ if (scan_arvif && scan_arvif->ar == ar) {
+ ar->scan.arvif = NULL;
+ ath12k_mac_remove_link_interface(hw, scan_arvif);
+ ath12k_mac_unassign_link_vif(scan_arvif);
+ }
+ }
+
+ if (arvif->ar) {
+ /* This is not expected really */
+ if (WARN_ON(!arvif->is_created)) {
+ arvif->ar = NULL;
+ return NULL;
+ }
+
+ if (ah->num_radio == 1)
+ return arvif->ar;
+
+ /* This can happen as scan vdev gets created during multiple scans
+ * across different radios before a vdev is brought up in
+ * a certain radio.
+ */
+ if (ar != arvif->ar) {
+ if (WARN_ON(arvif->is_started))
+ return NULL;
+
+ ath12k_mac_remove_link_interface(hw, arvif);
+ ath12k_mac_unassign_link_vif(arvif);
+ }
+ }
+
+ ab = ar->ab;
+
+ /* Assign arvif again here since previous radio switch block
+ * would've unassigned and cleared it.
+ */
+ arvif = ath12k_mac_assign_link_vif(ah, vif, link_id);
+ if (vif->type == NL80211_IFTYPE_AP &&
+ ar->num_peers > (ar->max_num_peers - 1)) {
+ ath12k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n");
+ goto unlock;
+ }
+
+ if (arvif->is_created)
+ goto flush;
+
+ if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
+ ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n",
+ TARGET_NUM_VDEVS);
+ goto unlock;
+ }
+
+ ret = ath12k_mac_vdev_create(ar, arvif);
+ if (ret) {
+ ath12k_warn(ab, "failed to create vdev %pM ret %d", vif->addr, ret);
+ goto unlock;
+ }
+
+flush:
+ /* If the vdev is created during channel assign and not during
+ * add_interface(), Apply any parameters for the vdev which were received
+ * after add_interface, corresponding to this vif.
+ */
+ ath12k_mac_vif_cache_flush(ar, arvif);
+unlock:
+ return arvif->ar;
+}
+
+static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_link_vif *arvif;
+ int i;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ memset(ahvif, 0, sizeof(*ahvif));
+
+ ahvif->ah = ah;
+ ahvif->vif = vif;
+ arvif = &ahvif->deflink;
+ arvif->ahvif = ahvif;
+
+ INIT_LIST_HEAD(&arvif->list);
+ INIT_DELAYED_WORK(&arvif->connection_loss_work,
+ ath12k_mac_vif_sta_connection_loss_work);
+
+ for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
+ arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+ memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].ht_mcs));
+ memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+ }
+
+ /* Allocate Default Queue now and reassign during actual vdev create */
+ vif->cab_queue = ATH12K_HW_DEFAULT_QUEUE;
+ for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
+ vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE;
+
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+ /* Defer vdev creation until assign_chanctx or hw_scan is initiated as driver
+ * will not know if this interface is an ML vif at this point.
+ */
+ return 0;
+}
+
static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif)
{
struct ath12k_tx_desc_info *tx_desc_info;
@@ -6007,30 +8258,15 @@ static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif
}
}
-static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int ath12k_mac_vdev_delete(struct ath12k *ar, struct ath12k_link_vif *arvif)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
- struct ath12k_base *ab;
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
+ struct ath12k_base *ab = ar->ab;
unsigned long time_left;
int ret;
- ar = ath12k_ah_to_ar(ah);
- ab = ar->ab;
-
- mutex_lock(&ar->conf_mutex);
-
- ath12k_dbg(ab, ATH12K_DBG_MAC, "mac remove interface (vdev %d)\n",
- arvif->vdev_id);
-
- if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
- ret = ath12k_peer_delete(ar, arvif->vdev_id, vif->addr);
- if (ret)
- ath12k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d\n",
- arvif->vdev_id, ret);
- }
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
reinit_completion(&ar->vdev_delete_done);
@@ -6048,18 +8284,17 @@ static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
goto err_vdev_del;
}
- if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ab->free_vdev_map |= 1LL << arvif->vdev_id;
+ ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
+ ar->num_created_vdevs--;
+
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ar->monitor_vdev_id = -1;
ar->monitor_vdev_created = false;
} else if (ar->monitor_vdev_created && !ar->monitor_started) {
ret = ath12k_mac_monitor_vdev_delete(ar);
}
- ab->free_vdev_map |= 1LL << (arvif->vdev_id);
- ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
- ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id);
- ar->num_created_vdevs--;
-
ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n",
vif->addr, arvif->vdev_id);
@@ -6069,6 +8304,7 @@ err_vdev_del:
spin_unlock_bh(&ar->data_lock);
ath12k_peer_cleanup(ar, arvif->vdev_id);
+ ath12k_ahvif_put_link_cache(ahvif, arvif->link_id);
idr_for_each(&ar->txmgmt_idr,
ath12k_mac_vif_txmgmt_idr_remove, vif);
@@ -6078,11 +8314,61 @@ err_vdev_del:
/* Recalc txpower for remaining vdev */
ath12k_mac_txpower_recalc(ar);
- clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
/* TODO: recal traffic pause state based on the available vdevs */
+ arvif->is_created = false;
+ arvif->ar = NULL;
- mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_link_vif *arvif;
+ struct ath12k *ar;
+ u8 link_id;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ for (link_id = 0; link_id < ATH12K_NUM_MAX_LINKS; link_id++) {
+ /* if we cached some config but never received assign chanctx,
+ * free the allocated cache.
+ */
+ ath12k_ahvif_put_link_cache(ahvif, link_id);
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ if (!arvif || !arvif->is_created)
+ continue;
+
+ ar = arvif->ar;
+
+ /* Scan abortion is in progress since before this, cancel_hw_scan()
+ * is expected to be executed. Since link is anyways going to be removed
+ * now, just cancel the worker and send the scan aborted to user space
+ */
+ if (ar->scan.arvif == arvif) {
+ wiphy_work_cancel(hw->wiphy, &ar->scan.vdev_clean_wk);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.arvif = NULL;
+ if (!ar->scan.is_roc) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ ieee80211_scan_completed(ar->ah->hw, &info);
+ }
+
+ ar->scan.state = ATH12K_SCAN_IDLE;
+ ar->scan_channel = NULL;
+ ar->scan.roc_freq = 0;
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ ath12k_mac_remove_link_interface(hw, arvif);
+ ath12k_mac_unassign_link_vif(arvif);
+ }
}
/* FIXME: Has to be verified. */
@@ -6101,7 +8387,7 @@ static void ath12k_mac_configure_filter(struct ath12k *ar,
bool reset_flag;
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
ar->filter_flags = total_flags;
@@ -6109,15 +8395,9 @@ static void ath12k_mac_configure_filter(struct ath12k *ar,
reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC);
ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag);
- if (!ret) {
- if (!reset_flag)
- set_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
- else
- clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
- } else {
+ if (ret)
ath12k_warn(ar->ab,
"fail to set monitor filter: %d\n", ret);
- }
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"total_flags:0x%x, reset_flag:%d\n",
@@ -6132,29 +8412,30 @@ static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
- ar = ath12k_ah_to_ar(ah);
+ lockdep_assert_wiphy(hw->wiphy);
- mutex_lock(&ar->conf_mutex);
+ ar = ath12k_ah_to_ar(ah, 0);
*total_flags &= SUPPORTED_FILTERS;
ath12k_mac_configure_filter(ar, *total_flags);
-
- mutex_unlock(&ar->conf_mutex);
}
static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ int antennas_rx = 0, antennas_tx = 0;
struct ath12k *ar;
+ int i;
- ar = ath12k_ah_to_ar(ah);
-
- mutex_lock(&ar->conf_mutex);
+ lockdep_assert_wiphy(hw->wiphy);
- *tx_ant = ar->cfg_tx_chainmask;
- *rx_ant = ar->cfg_rx_chainmask;
+ for_each_ar(ah, ar, i) {
+ antennas_rx = max_t(u32, antennas_rx, ar->cfg_rx_chainmask);
+ antennas_tx = max_t(u32, antennas_tx, ar->cfg_tx_chainmask);
+ }
- mutex_unlock(&ar->conf_mutex);
+ *tx_ant = antennas_tx;
+ *rx_ant = antennas_rx;
return 0;
}
@@ -6163,31 +8444,40 @@ static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
- int ret;
+ int ret = 0;
+ int i;
- ar = ath12k_ah_to_ar(ah);
+ lockdep_assert_wiphy(hw->wiphy);
- mutex_lock(&ar->conf_mutex);
- ret = __ath12k_set_antenna(ar, tx_ant, rx_ant);
- mutex_unlock(&ar->conf_mutex);
+ for_each_ar(ah, ar, i) {
+ ret = __ath12k_set_antenna(ar, tx_ant, rx_ant);
+ if (ret)
+ break;
+ }
return ret;
}
-static int ath12k_mac_ampdu_action(struct ath12k_vif *arvif,
- struct ieee80211_ampdu_params *params)
+static int ath12k_mac_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params,
+ u8 link_id)
{
- struct ath12k *ar = arvif->ar;
+ struct ath12k *ar;
int ret = -EINVAL;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
+ ar = ath12k_get_ar_by_vif(hw, vif, link_id);
+ if (!ar)
+ return -EINVAL;
switch (params->action) {
case IEEE80211_AMPDU_RX_START:
- ret = ath12k_dp_rx_ampdu_start(ar, params);
+ ret = ath12k_dp_rx_ampdu_start(ar, params, link_id);
break;
case IEEE80211_AMPDU_RX_STOP:
- ret = ath12k_dp_rx_ampdu_stop(ar, params);
+ ret = ath12k_dp_rx_ampdu_stop(ar, params, link_id);
break;
case IEEE80211_AMPDU_TX_START:
case IEEE80211_AMPDU_TX_STOP_CONT:
@@ -6201,6 +8491,10 @@ static int ath12k_mac_ampdu_action(struct ath12k_vif *arvif,
break;
}
+ if (ret)
+ ath12k_warn(ar->ab, "unable to perform ampdu action %d for vif %pM link %u ret %d\n",
+ params->action, vif->addr, link_id, ret);
+
return ret;
}
@@ -6208,40 +8502,44 @@ static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_sta *sta = params->sta;
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ unsigned long links_map = ahsta->links_map;
int ret = -EINVAL;
+ u8 link_id;
- ar = ath12k_ah_to_ar(ah);
+ lockdep_assert_wiphy(hw->wiphy);
- mutex_lock(&ar->conf_mutex);
- ret = ath12k_mac_ampdu_action(arvif, params);
- mutex_unlock(&ar->conf_mutex);
+ if (WARN_ON(!links_map))
+ return ret;
- if (ret)
- ath12k_warn(ar->ab, "pdev idx %d unable to perform ampdu action %d ret %d\n",
- ar->pdev_idx, params->action, ret);
+ for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+ ret = ath12k_mac_ampdu_action(hw, vif, params, link_id);
+ if (ret)
+ return ret;
+ }
- return ret;
+ return 0;
}
static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ath12k_base *ab;
- ar = ath12k_ah_to_ar(ah);
+ lockdep_assert_wiphy(hw->wiphy);
+
+ ar = ath12k_get_ar_by_ctx(hw, ctx);
+ if (!ar)
+ return -EINVAL;
+
ab = ar->ab;
ath12k_dbg(ab, ATH12K_DBG_MAC,
- "mac chanctx add freq %u width %d ptr %pK\n",
+ "mac chanctx add freq %u width %d ptr %p\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
- mutex_lock(&ar->conf_mutex);
-
spin_lock_bh(&ar->data_lock);
/* TODO: In case of multiple channel context, populate rx_channel from
* Rx PPDU desc information.
@@ -6249,35 +8547,33 @@ static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
ar->rx_channel = ctx->def.chan;
spin_unlock_bh(&ar->data_lock);
- mutex_unlock(&ar->conf_mutex);
-
return 0;
}
static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ath12k_base *ab;
- ar = ath12k_ah_to_ar(ah);
+ lockdep_assert_wiphy(hw->wiphy);
+
+ ar = ath12k_get_ar_by_ctx(hw, ctx);
+ if (!ar)
+ return;
+
ab = ar->ab;
ath12k_dbg(ab, ATH12K_DBG_MAC,
- "mac chanctx remove freq %u width %d ptr %pK\n",
+ "mac chanctx remove freq %u width %d ptr %p\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
- mutex_lock(&ar->conf_mutex);
-
spin_lock_bh(&ar->data_lock);
/* TODO: In case of there is one more channel context left, populate
* rx_channel with the channel of that remaining channel context.
*/
ar->rx_channel = NULL;
spin_unlock_bh(&ar->data_lock);
-
- mutex_unlock(&ar->conf_mutex);
}
static enum wmi_phy_mode
@@ -6286,14 +8582,24 @@ ath12k_mac_check_down_grade_phy_mode(struct ath12k *ar,
enum nl80211_band band,
enum nl80211_iftype type)
{
- struct ieee80211_sta_eht_cap *eht_cap;
+ struct ieee80211_sta_eht_cap *eht_cap = NULL;
enum wmi_phy_mode down_mode;
+ int n = ar->mac.sbands[band].n_iftype_data;
+ int i;
+ struct ieee80211_sband_iftype_data *data;
if (mode < MODE_11BE_EHT20)
return mode;
- eht_cap = &ar->mac.iftype[band][type].eht_cap;
- if (eht_cap->has_eht)
+ data = ar->mac.iftype[band];
+ for (i = 0; i < n; i++) {
+ if (data[i].types_mask & BIT(type)) {
+ eht_cap = &data[i].eht_cap;
+ break;
+ }
+ }
+
+ if (eht_cap && eht_cap->has_eht)
return mode;
switch (mode) {
@@ -6333,8 +8639,60 @@ ath12k_mac_check_down_grade_phy_mode(struct ath12k *ar,
return down_mode;
}
+static void
+ath12k_mac_mlo_get_vdev_args(struct ath12k_link_vif *arvif,
+ struct wmi_ml_arg *ml_arg)
+{
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct wmi_ml_partner_info *partner_info;
+ struct ieee80211_bss_conf *link_conf;
+ struct ath12k_link_vif *arvif_p;
+ unsigned long links;
+ u8 link_id;
+
+ lockdep_assert_wiphy(ahvif->ah->hw->wiphy);
+
+ if (!ath12k_mac_is_ml_arvif(arvif))
+ return;
+
+ if (hweight16(ahvif->vif->valid_links) > ATH12K_WMI_MLO_MAX_LINKS)
+ return;
+
+ ml_arg->enabled = true;
+
+ /* Driver always add a new link via VDEV START, FW takes
+ * care of internally adding this link to existing
+ * link vdevs which are advertised as partners below
+ */
+ ml_arg->link_add = true;
+ partner_info = ml_arg->partner_info;
+
+ links = ahvif->links_map;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif_p = wiphy_dereference(ahvif->ah->hw->wiphy, ahvif->link[link_id]);
+
+ if (WARN_ON(!arvif_p))
+ continue;
+
+ if (arvif == arvif_p)
+ continue;
+
+ link_conf = wiphy_dereference(ahvif->ah->hw->wiphy,
+ ahvif->vif->link_conf[arvif_p->link_id]);
+
+ if (!link_conf)
+ continue;
+
+ partner_info->vdev_id = arvif_p->vdev_id;
+ partner_info->hw_link_id = arvif_p->ar->pdev->hw_link_id;
+ ether_addr_copy(partner_info->addr, link_conf->addr);
+ ml_arg->num_partner_links++;
+ partner_info++;
+ }
+}
+
static int
-ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
+ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
struct ieee80211_chanctx_conf *ctx,
bool restart)
{
@@ -6342,10 +8700,20 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
struct ath12k_base *ab = ar->ab;
struct wmi_vdev_start_req_arg arg = {};
const struct cfg80211_chan_def *chandef = &ctx->def;
- int he_support = arvif->vif->bss_conf.he_support;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ieee80211_bss_conf *link_conf;
+ unsigned int dfs_cac_time;
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in vdev start for vif %pM link %u\n",
+ ahvif->vif->addr, arvif->link_id);
+ return -ENOLINK;
+ }
reinit_completion(&ar->vdev_setup_done);
@@ -6361,24 +8729,30 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
arg.mode = ath12k_mac_check_down_grade_phy_mode(ar, arg.mode,
chandef->chan->band,
- arvif->vif->type);
+ ahvif->vif->type);
arg.min_power = 0;
- arg.max_power = chandef->chan->max_power * 2;
- arg.max_reg_power = chandef->chan->max_reg_power * 2;
- arg.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
+ arg.max_power = chandef->chan->max_power;
+ arg.max_reg_power = chandef->chan->max_reg_power;
+ arg.max_antenna_gain = chandef->chan->max_antenna_gain;
arg.pref_tx_streams = ar->num_tx_chains;
arg.pref_rx_streams = ar->num_rx_chains;
- /* Fill the MBSSID flags to indicate AP is non MBSSID by default
- * Corresponding flags would be updated with MBSSID support.
- */
arg.mbssid_flags = WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP;
+ arg.mbssid_tx_vdev_id = 0;
+ if (test_bit(WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ ret = ath12k_mac_setup_vdev_params_mbssid(arvif,
+ &arg.mbssid_flags,
+ &arg.mbssid_tx_vdev_id);
+ if (ret)
+ return ret;
+ }
- if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
- arg.ssid = arvif->u.ap.ssid;
- arg.ssid_len = arvif->u.ap.ssid_len;
- arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ arg.ssid = ahvif->u.ap.ssid;
+ arg.ssid_len = ahvif->u.ap.ssid_len;
+ arg.hidden_ssid = ahvif->u.ap.hidden_ssid;
/* For now allow DFS for AP mode */
arg.chan_radar = !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
@@ -6392,7 +8766,7 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
spin_unlock_bh(&ab->base_lock);
/* TODO: Notify if secondary 80Mhz also needs radar detection */
- if (he_support) {
+ if (link_conf->he_support) {
ret = ath12k_set_he_mu_sounding_mode(ar, arvif);
if (ret) {
ath12k_warn(ar->ab, "failed to set he mode vdev %i\n",
@@ -6404,6 +8778,9 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
arg.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
+ if (!restart)
+ ath12k_mac_mlo_get_vdev_args(arvif, &arg.ml);
+
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac vdev %d start center_freq %d phymode %s punct_bitmap 0x%x\n",
arg.vdev_id, arg.freq,
@@ -6425,22 +8802,22 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
ar->num_started_vdevs++;
ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM started, vdev_id %d\n",
- arvif->vif->addr, arvif->vdev_id);
+ ahvif->vif->addr, arvif->vdev_id);
- /* Enable CAC Flag in the driver by checking the channel DFS cac time,
- * i.e dfs_cac_ms value which will be valid only for radar channels
- * and state as NL80211_DFS_USABLE which indicates CAC needs to be
- * done before channel usage. This flags is used to drop rx packets.
+ /* Enable CAC Running Flag in the driver by checking all sub-channel's DFS
+ * state as NL80211_DFS_USABLE which indicates CAC needs to be
+ * done before channel usage. This flag is used to drop rx packets.
* during CAC.
*/
/* TODO: Set the flag for other interface types as required */
- if (arvif->vdev_type == WMI_VDEV_TYPE_AP &&
- chandef->chan->dfs_cac_ms &&
- chandef->chan->dfs_state == NL80211_DFS_USABLE) {
- set_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
+ if (arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP && ctx->radar_enabled &&
+ cfg80211_chandef_dfs_usable(hw->wiphy, chandef)) {
+ set_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags);
+ dfs_cac_time = cfg80211_chandef_dfs_cac_time(hw->wiphy, chandef);
+
ath12k_dbg(ab, ATH12K_DBG_MAC,
- "CAC Started in chan_freq %d for vdev %d\n",
- arg.freq, arg.vdev_id);
+ "CAC started dfs_cac_time %u center_freq %d center_freq1 %d for vdev %d\n",
+ dfs_cac_time, arg.freq, arg.band_center_freq1, arg.vdev_id);
}
ret = ath12k_mac_set_txbf_conf(arvif);
@@ -6451,13 +8828,13 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
return 0;
}
-static int ath12k_mac_vdev_start(struct ath12k_vif *arvif,
+static int ath12k_mac_vdev_start(struct ath12k_link_vif *arvif,
struct ieee80211_chanctx_conf *ctx)
{
return ath12k_mac_vdev_start_restart(arvif, ctx, false);
}
-static int ath12k_mac_vdev_restart(struct ath12k_vif *arvif,
+static int ath12k_mac_vdev_restart(struct ath12k_link_vif *arvif,
struct ieee80211_chanctx_conf *ctx)
{
return ath12k_mac_vdev_start_restart(arvif, ctx, true);
@@ -6468,38 +8845,135 @@ struct ath12k_mac_change_chanctx_arg {
struct ieee80211_vif_chanctx_switch *vifs;
int n_vifs;
int next_vif;
+ struct ath12k *ar;
};
static void
ath12k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_mac_change_chanctx_arg *arg = data;
+ struct ieee80211_bss_conf *link_conf;
+ struct ath12k_link_vif *arvif;
+ unsigned long links_map;
+ u8 link_id;
- if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx)
- return;
+ lockdep_assert_wiphy(ahvif->ah->hw->wiphy);
+
+ links_map = ahvif->links_map;
+ for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(ahvif->ah->hw->wiphy, ahvif->link[link_id]);
+ if (WARN_ON(!arvif))
+ continue;
+
+ if (arvif->ar != arg->ar)
+ continue;
+
+ link_conf = wiphy_dereference(ahvif->ah->hw->wiphy,
+ vif->link_conf[link_id]);
+ if (WARN_ON(!link_conf))
+ continue;
- arg->n_vifs++;
+ if (rcu_access_pointer(link_conf->chanctx_conf) != arg->ctx)
+ continue;
+
+ arg->n_vifs++;
+ }
}
static void
ath12k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_mac_change_chanctx_arg *arg = data;
+ struct ieee80211_bss_conf *link_conf;
struct ieee80211_chanctx_conf *ctx;
+ struct ath12k_link_vif *arvif;
+ unsigned long links_map;
+ u8 link_id;
- ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf);
- if (ctx != arg->ctx)
- return;
+ lockdep_assert_wiphy(ahvif->ah->hw->wiphy);
- if (WARN_ON(arg->next_vif == arg->n_vifs))
- return;
+ links_map = ahvif->links_map;
+ for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(ahvif->ah->hw->wiphy, ahvif->link[link_id]);
+ if (WARN_ON(!arvif))
+ continue;
+
+ if (arvif->ar != arg->ar)
+ continue;
- arg->vifs[arg->next_vif].vif = vif;
- arg->vifs[arg->next_vif].old_ctx = ctx;
- arg->vifs[arg->next_vif].new_ctx = ctx;
- arg->next_vif++;
+ link_conf = wiphy_dereference(ahvif->ah->hw->wiphy,
+ vif->link_conf[arvif->link_id]);
+ if (WARN_ON(!link_conf))
+ continue;
+
+ ctx = rcu_access_pointer(link_conf->chanctx_conf);
+ if (ctx != arg->ctx)
+ continue;
+
+ if (WARN_ON(arg->next_vif == arg->n_vifs))
+ return;
+
+ arg->vifs[arg->next_vif].vif = vif;
+ arg->vifs[arg->next_vif].old_ctx = ctx;
+ arg->vifs[arg->next_vif].new_ctx = ctx;
+ arg->vifs[arg->next_vif].link_conf = link_conf;
+ arg->next_vif++;
+ }
+}
+
+static u32 ath12k_mac_nlwidth_to_wmiwidth(enum nl80211_chan_width width)
+{
+ switch (width) {
+ case NL80211_CHAN_WIDTH_20:
+ return WMI_CHAN_WIDTH_20;
+ case NL80211_CHAN_WIDTH_40:
+ return WMI_CHAN_WIDTH_40;
+ case NL80211_CHAN_WIDTH_80:
+ return WMI_CHAN_WIDTH_80;
+ case NL80211_CHAN_WIDTH_160:
+ return WMI_CHAN_WIDTH_160;
+ case NL80211_CHAN_WIDTH_80P80:
+ return WMI_CHAN_WIDTH_80P80;
+ case NL80211_CHAN_WIDTH_5:
+ return WMI_CHAN_WIDTH_5;
+ case NL80211_CHAN_WIDTH_10:
+ return WMI_CHAN_WIDTH_10;
+ case NL80211_CHAN_WIDTH_320:
+ return WMI_CHAN_WIDTH_320;
+ default:
+ WARN_ON(1);
+ return WMI_CHAN_WIDTH_20;
+ }
+}
+
+static int ath12k_mac_update_peer_puncturing_width(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct cfg80211_chan_def def)
+{
+ u32 param_id, param_value;
+ int ret;
+
+ if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ param_id = WMI_PEER_CHWIDTH_PUNCTURE_20MHZ_BITMAP;
+ param_value = ath12k_mac_nlwidth_to_wmiwidth(def.width) |
+ u32_encode_bits((~def.punctured),
+ WMI_PEER_PUNCTURE_BITMAP);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "punctured bitmap %02x width %d vdev %d\n",
+ def.punctured, def.width, arvif->vdev_id);
+
+ ret = ath12k_wmi_set_peer_param(ar, arvif->bssid,
+ arvif->vdev_id, param_id,
+ param_value);
+
+ return ret;
}
static void
@@ -6507,18 +8981,28 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
struct ieee80211_vif_chanctx_switch *vifs,
int n_vifs)
{
+ struct ath12k_wmi_vdev_up_params params = {};
+ struct ieee80211_bss_conf *link_conf;
struct ath12k_base *ab = ar->ab;
- struct ath12k_vif *arvif;
+ struct ath12k_link_vif *arvif;
+ struct ieee80211_vif *vif;
+ struct ath12k_vif *ahvif;
+ u8 link_id;
int ret;
int i;
bool monitor_vif = false;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
for (i = 0; i < n_vifs; i++) {
- arvif = ath12k_vif_to_arvif(vifs[i].vif);
-
- if (vifs[i].vif->type == NL80211_IFTYPE_MONITOR)
+ vif = vifs[i].vif;
+ ahvif = ath12k_vif_to_ahvif(vif);
+ link_conf = vifs[i].link_conf;
+ link_id = link_conf->link_id;
+ arvif = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahvif->link[link_id]);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR)
monitor_vif = true;
ath12k_dbg(ab, ATH12K_DBG_MAC,
@@ -6532,29 +9016,6 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
if (WARN_ON(!arvif->is_started))
continue;
- if (WARN_ON(!arvif->is_up))
- continue;
-
- ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id);
- if (ret) {
- ath12k_warn(ab, "failed to down vdev %d: %d\n",
- arvif->vdev_id, ret);
- continue;
- }
- }
-
- /* All relevant vdevs are downed and associated channel resources
- * should be available for the channel switch now.
- */
-
- /* TODO: Update ar->rx_channel */
-
- for (i = 0; i < n_vifs; i++) {
- arvif = ath12k_vif_to_arvif(vifs[i].vif);
-
- if (WARN_ON(!arvif->is_started))
- continue;
-
arvif->punct_bitmap = vifs[i].new_ctx->def.punctured;
/* Firmware expect vdev_restart only if vdev is up.
@@ -6587,13 +9048,35 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
ath12k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
ret);
- ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
- arvif->bssid);
+ memset(&params, 0, sizeof(params));
+ params.vdev_id = arvif->vdev_id;
+ params.aid = ahvif->aid;
+ params.bssid = arvif->bssid;
+ if (vif->mbssid_tx_vif) {
+ struct ath12k_vif *tx_ahvif =
+ ath12k_vif_to_ahvif(vif->mbssid_tx_vif);
+ struct ath12k_link_vif *tx_arvif = &tx_ahvif->deflink;
+
+ params.tx_bssid = tx_arvif->bssid;
+ params.nontx_profile_idx = link_conf->bssid_index;
+ params.nontx_profile_cnt = 1 << link_conf->bssid_indicator;
+ }
+ ret = ath12k_wmi_vdev_up(arvif->ar, &params);
if (ret) {
ath12k_warn(ab, "failed to bring vdev up %d: %d\n",
arvif->vdev_id, ret);
continue;
}
+
+ ret = ath12k_mac_update_peer_puncturing_width(arvif->ar, arvif,
+ vifs[i].new_ctx->def);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to update puncturing bitmap %02x and width %d: %d\n",
+ vifs[i].new_ctx->def.punctured,
+ vifs[i].new_ctx->def.width, ret);
+ continue;
+ }
}
/* Restart the internal monitor vdev on new channel */
@@ -6607,10 +9090,10 @@ static void
ath12k_mac_update_active_vif_chan(struct ath12k *ar,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx };
+ struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx, .ar = ar };
struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
ieee80211_iterate_active_interfaces_atomic(hw,
IEEE80211_IFACE_ITER_NORMAL,
@@ -6637,24 +9120,26 @@ static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx,
u32 changed)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ath12k_base *ab;
- ar = ath12k_ah_to_ar(ah);
- ab = ar->ab;
+ lockdep_assert_wiphy(hw->wiphy);
+
+ ar = ath12k_get_ar_by_ctx(hw, ctx);
+ if (!ar)
+ return;
- mutex_lock(&ar->conf_mutex);
+ ab = ar->ab;
ath12k_dbg(ab, ATH12K_DBG_MAC,
- "mac chanctx change freq %u width %d ptr %pK changed %x\n",
+ "mac chanctx change freq %u width %d ptr %p changed %x\n",
ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
/* This shouldn't really happen because channel switching should use
* switch_vif_chanctx().
*/
if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
- goto unlock;
+ return;
if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH ||
changed & IEEE80211_CHANCTX_CHANGE_RADAR ||
@@ -6662,16 +9147,14 @@ static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw,
ath12k_mac_update_active_vif_chan(ar, ctx);
/* TODO: Recalc radar detection */
-
-unlock:
- mutex_unlock(&ar->conf_mutex);
}
static int ath12k_start_vdev_delay(struct ath12k *ar,
- struct ath12k_vif *arvif)
+ struct ath12k_link_vif *arvif)
{
struct ath12k_base *ab = ar->ab;
- struct ieee80211_vif *vif = arvif->vif;
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
int ret;
if (WARN_ON(arvif->is_started))
@@ -6685,7 +9168,7 @@ static int ath12k_start_vdev_delay(struct ath12k *ar,
return ret;
}
- if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath12k_monitor_vdev_up(ar, arvif->vdev_id);
if (ret) {
ath12k_warn(ab, "failed put monitor up: %d\n", ret);
@@ -6708,25 +9191,41 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ath12k_base *ab;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ u8 link_id = link_conf->link_id;
+ struct ath12k_link_vif *arvif;
int ret;
- struct ath12k_wmi_peer_create_arg param;
- ar = ath12k_ah_to_ar(ah);
- ab = ar->ab;
+ lockdep_assert_wiphy(hw->wiphy);
- mutex_lock(&ar->conf_mutex);
+ /* For multi radio wiphy, the vdev was not created during add_interface
+ * create now since we have a channel ctx now to assign to a specific ar/fw
+ */
+ arvif = ath12k_mac_assign_link_vif(ah, vif, link_id);
+ if (!arvif) {
+ WARN_ON(1);
+ return -ENOMEM;
+ }
+
+ ar = ath12k_mac_assign_vif_to_vdev(hw, arvif, ctx);
+ if (!ar) {
+ ath12k_warn(arvif->ar->ab, "failed to assign chanctx for vif %pM link id %u link vif is already started",
+ vif->addr, link_id);
+ return -EINVAL;
+ }
+
+ ab = ar->ab;
ath12k_dbg(ab, ATH12K_DBG_MAC,
- "mac chanctx assign ptr %pK vdev_id %i\n",
+ "mac chanctx assign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
arvif->punct_bitmap = ctx->def.punctured;
/* for some targets bss peer must be created before vdev_start */
if (ab->hw_params->vdev_start_delay &&
- arvif->vdev_type != WMI_VDEV_TYPE_AP &&
- arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+ ahvif->vdev_type != WMI_VDEV_TYPE_AP &&
+ ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
!ath12k_peer_exist_by_vdev_id(ab, arvif->vdev_id)) {
memcpy(&arvif->chanctx, ctx, sizeof(*ctx));
ret = 0;
@@ -6738,25 +9237,11 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
- if (ab->hw_params->vdev_start_delay &&
- arvif->vdev_type != WMI_VDEV_TYPE_AP &&
- arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
- param.vdev_id = arvif->vdev_id;
- param.peer_type = WMI_PEER_TYPE_DEFAULT;
- param.peer_addr = ar->mac_addr;
-
- ret = ath12k_peer_create(ar, arvif, NULL, &param);
- if (ret) {
- ath12k_warn(ab, "failed to create peer after vdev start delay: %d",
- ret);
- goto out;
- }
- }
-
- if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath12k_mac_monitor_start(ar);
if (ret)
goto out;
+
arvif->is_started = true;
goto out;
}
@@ -6769,7 +9254,7 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
- if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && ar->monitor_vdev_created)
+ if (ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR && ar->monitor_vdev_created)
ath12k_mac_monitor_start(ar);
arvif->is_started = true;
@@ -6777,8 +9262,6 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
/* TODO: Setup ps and cts/rts protection */
out:
- mutex_unlock(&ar->conf_mutex);
-
return ret;
}
@@ -6788,39 +9271,46 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ath12k_base *ab;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_link_vif *arvif;
+ u8 link_id = link_conf->link_id;
int ret;
- ar = ath12k_ah_to_ar(ah);
- ab = ar->ab;
+ lockdep_assert_wiphy(hw->wiphy);
- mutex_lock(&ar->conf_mutex);
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+
+ /* The vif is expected to be attached to an ar's VDEV.
+ * We leave the vif/vdev in this function as is
+ * and not delete the vdev symmetric to assign_vif_chanctx()
+ * the VDEV will be deleted and unassigned either during
+ * remove_interface() or when there is a change in channel
+ * that moves the vif to a new ar
+ */
+ if (!arvif || !arvif->is_created)
+ return;
+
+ ar = arvif->ar;
+ ab = ar->ab;
ath12k_dbg(ab, ATH12K_DBG_MAC,
- "mac chanctx unassign ptr %pK vdev_id %i\n",
+ "mac chanctx unassign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
WARN_ON(!arvif->is_started);
- if (ab->hw_params->vdev_start_delay &&
- arvif->vdev_type == WMI_VDEV_TYPE_MONITOR &&
- ath12k_peer_find_by_addr(ab, ar->mac_addr))
- ath12k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
-
- if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath12k_mac_monitor_stop(ar);
- if (ret) {
- mutex_unlock(&ar->conf_mutex);
+ if (ret)
return;
- }
arvif->is_started = false;
}
- if (arvif->vdev_type != WMI_VDEV_TYPE_STA) {
+ if (ahvif->vdev_type != WMI_VDEV_TYPE_STA &&
+ ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
ath12k_bss_disassoc(ar, arvif);
ret = ath12k_mac_vdev_stop(arvif);
if (ret)
@@ -6829,15 +9319,12 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
}
arvif->is_started = false;
- if (ab->hw_params->vdev_start_delay &&
- arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
- ath12k_wmi_vdev_down(ar, arvif->vdev_id);
-
- if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+ if (ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
ar->num_started_vdevs == 1 && ar->monitor_vdev_created)
ath12k_mac_monitor_stop(ar);
- mutex_unlock(&ar->conf_mutex);
+ ath12k_mac_remove_link_interface(hw, arvif);
+ ath12k_mac_unassign_link_vif(arvif);
}
static int
@@ -6846,30 +9333,34 @@ ath12k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
int n_vifs,
enum ieee80211_chanctx_switch_mode mode)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
- ar = ath12k_ah_to_ar(ah);
+ lockdep_assert_wiphy(hw->wiphy);
- mutex_lock(&ar->conf_mutex);
+ ar = ath12k_get_ar_by_ctx(hw, vifs->old_ctx);
+ if (!ar)
+ return -EINVAL;
+
+ /* Switching channels across radio is not allowed */
+ if (ar != ath12k_get_ar_by_ctx(hw, vifs->new_ctx))
+ return -EINVAL;
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"mac chanctx switch n_vifs %d mode %d\n",
n_vifs, mode);
ath12k_mac_update_vif_chan(ar, vifs, n_vifs);
- mutex_unlock(&ar->conf_mutex);
-
return 0;
}
static int
ath12k_set_vdev_param_to_all_vifs(struct ath12k *ar, int param, u32 value)
{
- struct ath12k_vif *arvif;
+ struct ath12k_link_vif *arvif;
int ret = 0;
- mutex_lock(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
list_for_each_entry(arvif, &ar->arvifs, list) {
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "setting mac vdev %d param %d value %d\n",
param, arvif->vdev_id, value);
@@ -6882,7 +9373,7 @@ ath12k_set_vdev_param_to_all_vifs(struct ath12k *ar, int param, u32 value)
break;
}
}
- mutex_unlock(&ar->conf_mutex);
+
return ret;
}
@@ -6893,11 +9384,23 @@ static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
- int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD, ret;
+ int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD, ret = 0, i;
- ar = ath12k_ah_to_ar(ah);
+ lockdep_assert_wiphy(hw->wiphy);
- ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
+ /* Currently we set the rts threshold value to all the vifs across
+ * all radios of the single wiphy.
+ * TODO Once support for vif specific RTS threshold in mac80211 is
+ * available, ath12k can make use of it.
+ */
+ for_each_ar(ah, ar, i) {
+ ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set RTS config for all vdevs of pdev %d",
+ ar->pdev->pdev_id);
+ break;
+ }
+ }
return ret;
}
@@ -6914,37 +9417,83 @@ static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
* supported. This effectively prevents mac80211 from doing frame
* fragmentation in software.
*/
+
+ lockdep_assert_wiphy(hw->wiphy);
+
return -EOPNOTSUPP;
}
-static void ath12k_mac_flush(struct ath12k *ar)
+static int ath12k_mac_flush(struct ath12k *ar)
{
long time_left;
+ int ret = 0;
time_left = wait_event_timeout(ar->dp.tx_empty_waitq,
(atomic_read(&ar->dp.num_tx_pending) == 0),
ATH12K_FLUSH_TIMEOUT);
- if (time_left == 0)
- ath12k_warn(ar->ab, "failed to flush transmit queue %ld\n", time_left);
+ if (time_left == 0) {
+ ath12k_warn(ar->ab,
+ "failed to flush transmit queue, data pkts pending %d\n",
+ atomic_read(&ar->dp.num_tx_pending));
+ ret = -ETIMEDOUT;
+ }
time_left = wait_event_timeout(ar->txmgmt_empty_waitq,
(atomic_read(&ar->num_pending_mgmt_tx) == 0),
ATH12K_FLUSH_TIMEOUT);
- if (time_left == 0)
- ath12k_warn(ar->ab, "failed to flush mgmt transmit queue %ld\n",
- time_left);
+ if (time_left == 0) {
+ ath12k_warn(ar->ab,
+ "failed to flush mgmt transmit queue, mgmt pkts pending %d\n",
+ atomic_read(&ar->num_pending_mgmt_tx));
+ ret = -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+int ath12k_mac_wait_tx_complete(struct ath12k *ar)
+{
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ ath12k_mac_drain_tx(ar);
+ return ath12k_mac_flush(ar);
}
static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k_link_vif *arvif;
+ struct ath12k_vif *ahvif;
+ unsigned long links;
+ struct ath12k *ar;
+ u8 link_id;
+ int i;
+
+ lockdep_assert_wiphy(hw->wiphy);
if (drop)
return;
- ath12k_mac_flush(ar);
+ /* vif can be NULL when flush() is considered for hw */
+ if (!vif) {
+ for_each_ar(ah, ar, i)
+ ath12k_mac_flush(ar);
+ return;
+ }
+
+ for_each_ar(ah, ar, i)
+ wiphy_work_flush(hw->wiphy, &ar->wmi_mgmt_tx_work);
+
+ ahvif = ath12k_vif_to_ahvif(vif);
+ links = ahvif->links_map;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ if (!(arvif && arvif->ar))
+ continue;
+
+ ath12k_mac_flush(arvif->ar);
+ }
}
static int
@@ -7062,14 +9611,14 @@ ath12k_mac_get_single_legacy_rate(struct ath12k *ar,
return 0;
}
-static int ath12k_mac_set_fixed_rate_params(struct ath12k_vif *arvif,
+static int ath12k_mac_set_fixed_rate_params(struct ath12k_link_vif *arvif,
u32 rate, u8 nss, u8 sgi, u8 ldpc)
{
struct ath12k *ar = arvif->ar;
u32 vdev_param;
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n",
arvif->vdev_id, rate, nss, sgi);
@@ -7141,32 +9690,47 @@ ath12k_mac_vht_mcs_range_present(struct ath12k *ar,
static void ath12k_mac_set_bitrate_mask_iter(void *data,
struct ieee80211_sta *sta)
{
- struct ath12k_vif *arvif = data;
- struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta);
+ struct ath12k_link_vif *arvif = data;
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ struct ath12k_link_sta *arsta;
struct ath12k *ar = arvif->ar;
+ arsta = rcu_dereference(ahsta->link[arvif->link_id]);
+ if (!arsta || arsta->arvif != arvif)
+ return;
+
spin_lock_bh(&ar->data_lock);
arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
spin_unlock_bh(&ar->data_lock);
- ieee80211_queue_work(ath12k_ar_to_hw(ar), &arsta->update_wk);
+ wiphy_work_queue(ath12k_ar_to_hw(ar)->wiphy, &arsta->update_wk);
}
static void ath12k_mac_disable_peer_fixed_rate(void *data,
struct ieee80211_sta *sta)
{
- struct ath12k_vif *arvif = data;
+ struct ath12k_link_vif *arvif = data;
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ struct ath12k_link_sta *arsta;
struct ath12k *ar = arvif->ar;
int ret;
- ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahsta->link[arvif->link_id]);
+
+ if (!arsta || arsta->arvif != arvif)
+ return;
+
+ ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id,
WMI_PEER_PARAM_FIXED_RATE,
WMI_FIXED_RATE_NONE);
if (ret)
ath12k_warn(ar->ab,
"failed to disable peer fixed rate for STA %pM ret %d\n",
- sta->addr, ret);
+ arsta->addr, ret);
}
static int
@@ -7174,9 +9738,10 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask)
{
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_link_vif *arvif;
struct cfg80211_chan_def def;
- struct ath12k *ar = arvif->ar;
+ struct ath12k *ar;
enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
@@ -7188,8 +9753,15 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
int ret;
int num_rates;
- if (ath12k_mac_vif_chan(vif, &def))
- return -EPERM;
+ lockdep_assert_wiphy(hw->wiphy);
+
+ arvif = &ahvif->deflink;
+
+ ar = arvif->ar;
+ if (ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)) {
+ ret = -EPERM;
+ goto out;
+ }
band = def.chan->band;
ht_mcs_mask = mask->control[band].ht_mcs;
@@ -7218,9 +9790,9 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
goto out;
}
- ieee80211_iterate_stations_atomic(hw,
- ath12k_mac_disable_peer_fixed_rate,
- arvif);
+ ieee80211_iterate_stations_mtx(hw,
+ ath12k_mac_disable_peer_fixed_rate,
+ arvif);
} else if (ath12k_mac_bitrate_mask_get_single_nss(ar, band, mask,
&single_nss)) {
rate = WMI_FIXED_RATE_NONE;
@@ -7261,34 +9833,25 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
*/
ath12k_warn(ar->ab,
"Setting more than one MCS Value in bitrate mask not supported\n");
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
- ieee80211_iterate_stations_atomic(hw,
- ath12k_mac_disable_peer_fixed_rate,
- arvif);
-
- mutex_lock(&ar->conf_mutex);
+ ieee80211_iterate_stations_mtx(hw,
+ ath12k_mac_disable_peer_fixed_rate,
+ arvif);
arvif->bitrate_mask = *mask;
- ieee80211_iterate_stations_atomic(hw,
- ath12k_mac_set_bitrate_mask_iter,
- arvif);
-
- mutex_unlock(&ar->conf_mutex);
+ ieee80211_iterate_stations_mtx(hw,
+ ath12k_mac_set_bitrate_mask_iter,
+ arvif);
}
- mutex_lock(&ar->conf_mutex);
-
ret = ath12k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
if (ret) {
ath12k_warn(ar->ab, "failed to set fixed rate params on vdev %i: %d\n",
arvif->vdev_id, ret);
}
- mutex_unlock(&ar->conf_mutex);
-
out:
return ret;
}
@@ -7300,27 +9863,35 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ath12k_base *ab;
- struct ath12k_vif *arvif;
- int recovery_count;
+ struct ath12k_vif *ahvif;
+ struct ath12k_link_vif *arvif;
+ int recovery_count, i;
+
+ lockdep_assert_wiphy(hw->wiphy);
if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
return;
- ar = ath12k_ah_to_ar(ah);
- ab = ar->ab;
+ guard(mutex)(&ah->hw_mutex);
- mutex_lock(&ar->conf_mutex);
+ if (ah->state != ATH12K_HW_STATE_RESTARTED)
+ return;
+
+ ah->state = ATH12K_HW_STATE_ON;
+ ieee80211_wake_queues(hw);
+
+ for_each_ar(ah, ar, i) {
+ ab = ar->ab;
- if (ar->state == ATH12K_STATE_RESTARTED) {
ath12k_warn(ar->ab, "pdev %d successfully recovered\n",
ar->pdev->pdev_id);
- ar->state = ATH12K_STATE_ON;
- ieee80211_wake_queues(hw);
if (ab->is_reset) {
recovery_count = atomic_inc_return(&ab->recovery_count);
+
ath12k_dbg(ab, ATH12K_DBG_BOOT, "recovery count %d\n",
recovery_count);
+
/* When there are multiple radios in an SOC,
* the recovery has to be done for each radio
*/
@@ -7334,27 +9905,28 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
}
list_for_each_entry(arvif, &ar->arvifs, list) {
+ ahvif = arvif->ahvif;
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"reconfig cipher %d up %d vdev type %d\n",
- arvif->key_cipher,
+ ahvif->key_cipher,
arvif->is_up,
- arvif->vdev_type);
+ ahvif->vdev_type);
+
/* After trigger disconnect, then upper layer will
* trigger connect again, then the PN number of
* upper layer will be reset to keep up with AP
* side, hence PN number mismatch will not happen.
*/
if (arvif->is_up &&
- arvif->vdev_type == WMI_VDEV_TYPE_STA &&
- arvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE) {
- ieee80211_hw_restart_disconnect(arvif->vif);
+ ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE) {
+ ieee80211_hw_restart_disconnect(ahvif->vif);
+
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"restart disconnect\n");
}
}
}
-
- mutex_unlock(&ar->conf_mutex);
}
static void
@@ -7364,7 +9936,7 @@ ath12k_mac_update_bss_chan_survey(struct ath12k *ar,
int ret;
enum wmi_bss_chan_info_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
if (!test_bit(WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64, ar->ab->wmi_ab.svc_map) ||
ar->rx_channel != channel)
@@ -7392,21 +9964,15 @@ ath12k_mac_update_bss_chan_survey(struct ath12k *ar,
static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ieee80211_supported_band *sband;
struct survey_info *ar_survey;
- int ret = 0;
+
+ lockdep_assert_wiphy(hw->wiphy);
if (idx >= ATH12K_NUM_CHANS)
return -ENOENT;
- ar = ath12k_ah_to_ar(ah);
-
- ar_survey = &ar->survey[idx];
-
- mutex_lock(&ar->conf_mutex);
-
sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
if (sband && idx >= sband->n_channels) {
idx -= sband->n_channels;
@@ -7415,12 +9981,28 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
if (!sband)
sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
+ if (sband && idx >= sband->n_channels) {
+ idx -= sband->n_channels;
+ sband = NULL;
+ }
- if (!sband || idx >= sband->n_channels) {
- ret = -ENOENT;
- goto exit;
+ if (!sband)
+ sband = hw->wiphy->bands[NL80211_BAND_6GHZ];
+
+ if (!sband || idx >= sband->n_channels)
+ return -ENOENT;
+
+ ar = ath12k_mac_get_ar_by_chan(hw, &sband->channels[idx]);
+ if (!ar) {
+ if (sband->channels[idx].flags & IEEE80211_CHAN_DISABLED) {
+ memset(survey, 0, sizeof(*survey));
+ return 0;
+ }
+ return -ENOENT;
}
+ ar_survey = &ar->survey[idx];
+
ath12k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
spin_lock_bh(&ar->data_lock);
@@ -7432,10 +10014,7 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
if (ar->rx_channel == survey->channel)
survey->filled |= SURVEY_INFO_IN_USE;
-exit:
- mutex_unlock(&ar->conf_mutex);
-
- return ret;
+ return 0;
}
static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
@@ -7443,7 +10022,12 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
struct station_info *sinfo)
{
- struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta);
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ struct ath12k_link_sta *arsta;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ arsta = &ahsta->deflink;
sinfo->rx_duration = arsta->rx_duration;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
@@ -7478,9 +10062,9 @@ static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_ah_to_ar(ah, 0);
- mutex_lock(&ar->conf_mutex);
+ lockdep_assert_wiphy(hw->wiphy);
spin_lock_bh(&ar->data_lock);
ar->scan.roc_notify = false;
@@ -7488,9 +10072,8 @@ static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
ath12k_scan_abort(ar);
- mutex_unlock(&ar->conf_mutex);
-
cancel_delayed_work_sync(&ar->scan.timeout);
+ wiphy_work_cancel(hw->wiphy, &ar->scan.vdev_clean_wk);
return 0;
}
@@ -7501,16 +10084,63 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
int duration,
enum ieee80211_roc_type type)
{
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k_wmi_scan_req_arg arg;
+ struct ath12k_link_vif *arvif;
struct ath12k *ar;
u32 scan_time_msec;
+ bool create = true;
+ u8 link_id;
int ret;
- ar = ath12k_ah_to_ar(ah);
+ lockdep_assert_wiphy(hw->wiphy);
+
+ ar = ath12k_mac_select_scan_device(hw, vif, chan->center_freq);
+ if (!ar)
+ return -EINVAL;
+
+ /* check if any of the links of ML VIF is already started on
+ * radio(ar) correpsondig to given scan frequency and use it,
+ * if not use deflink(link 0) for scan purpose.
+ */
+
+ link_id = ath12k_mac_find_link_id_by_ar(ahvif, ar);
+ arvif = ath12k_mac_assign_link_vif(ah, vif, link_id);
+ /* If the vif is already assigned to a specific vdev of an ar,
+ * check whether its already started, vdev which is started
+ * are not allowed to switch to a new radio.
+ * If the vdev is not started, but was earlier created on a
+ * different ar, delete that vdev and create a new one. We don't
+ * delete at the scan stop as an optimization to avoid redundant
+ * delete-create vdev's for the same ar, in case the request is
+ * always on the same band for the vif
+ */
+ if (arvif->is_created) {
+ if (WARN_ON(!arvif->ar))
+ return -EINVAL;
+
+ if (ar != arvif->ar && arvif->is_started)
+ return -EBUSY;
+
+ if (ar != arvif->ar) {
+ ath12k_mac_remove_link_interface(hw, arvif);
+ ath12k_mac_unassign_link_vif(arvif);
+ } else {
+ create = false;
+ }
+ }
+
+ if (create) {
+ arvif = ath12k_mac_assign_link_vif(ah, vif, link_id);
+
+ ret = ath12k_mac_vdev_create(ar, arvif);
+ if (ret) {
+ ath12k_warn(ar->ab, "unable to create scan vdev for roc: %d\n",
+ ret);
+ return ret;
+ }
+ }
- mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
switch (ar->scan.state) {
@@ -7520,7 +10150,7 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
reinit_completion(&ar->scan.on_channel);
ar->scan.state = ATH12K_SCAN_STARTING;
ar->scan.is_roc = true;
- ar->scan.vdev_id = arvif->vdev_id;
+ ar->scan.arvif = arvif;
ar->scan.roc_freq = chan->center_freq;
ar->scan.roc_notify = true;
ret = 0;
@@ -7535,37 +10165,41 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->data_lock);
if (ret)
- goto exit;
+ return ret;
scan_time_msec = hw->wiphy->max_remain_on_channel_duration * 2;
- memset(&arg, 0, sizeof(arg));
- ath12k_wmi_start_scan_init(ar, &arg);
- arg.num_chan = 1;
- arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
- GFP_KERNEL);
- if (!arg.chan_list) {
- ret = -ENOMEM;
- goto exit;
- }
+ struct ath12k_wmi_scan_req_arg *arg __free(kfree) =
+ kzalloc(sizeof(*arg), GFP_KERNEL);
+ if (!arg)
+ return -ENOMEM;
- arg.vdev_id = arvif->vdev_id;
- arg.scan_id = ATH12K_SCAN_ID;
- arg.chan_list[0] = chan->center_freq;
- arg.dwell_time_active = scan_time_msec;
- arg.dwell_time_passive = scan_time_msec;
- arg.max_scan_time = scan_time_msec;
- arg.scan_f_passive = 1;
- arg.burst_duration = duration;
-
- ret = ath12k_start_scan(ar, &arg);
+ ath12k_wmi_start_scan_init(ar, arg);
+ arg->num_chan = 1;
+
+ u32 *chan_list __free(kfree) = kcalloc(arg->num_chan, sizeof(*chan_list),
+ GFP_KERNEL);
+ if (!chan_list)
+ return -ENOMEM;
+
+ arg->chan_list = chan_list;
+ arg->vdev_id = arvif->vdev_id;
+ arg->scan_id = ATH12K_SCAN_ID;
+ arg->chan_list[0] = chan->center_freq;
+ arg->dwell_time_active = scan_time_msec;
+ arg->dwell_time_passive = scan_time_msec;
+ arg->max_scan_time = scan_time_msec;
+ arg->scan_f_passive = 1;
+ arg->burst_duration = duration;
+
+ ret = ath12k_start_scan(ar, arg);
if (ret) {
ath12k_warn(ar->ab, "failed to start roc scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
ar->scan.state = ATH12K_SCAN_IDLE;
spin_unlock_bh(&ar->data_lock);
- goto free_chan_list;
+ return ret;
}
ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
@@ -7574,21 +10208,49 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
ret = ath12k_scan_stop(ar);
if (ret)
ath12k_warn(ar->ab, "failed to stop scan: %d\n", ret);
- ret = -ETIMEDOUT;
- goto free_chan_list;
+ return -ETIMEDOUT;
}
ieee80211_queue_delayed_work(hw, &ar->scan.timeout,
msecs_to_jiffies(duration));
- ret = 0;
+ return 0;
+}
-free_chan_list:
- kfree(arg.chan_list);
-exit:
- mutex_unlock(&ar->conf_mutex);
+static void ath12k_mac_op_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_rekey_data *rekey_data;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
+ struct ath12k_link_vif *arvif;
- return ret;
+ lockdep_assert_wiphy(hw->wiphy);
+
+ arvif = &ahvif->deflink;
+ rekey_data = &arvif->rekey_data;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac set rekey data vdev %d\n",
+ arvif->vdev_id);
+
+ memcpy(rekey_data->kck, data->kck, NL80211_KCK_LEN);
+ memcpy(rekey_data->kek, data->kek, NL80211_KEK_LEN);
+
+ /* The supplicant works on big-endian, the firmware expects it on
+ * little endian.
+ */
+ rekey_data->replay_ctr = get_unaligned_be64(data->replay_ctr);
+
+ arvif->rekey_data.enable_offload = true;
+
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_MAC, "kck", NULL,
+ rekey_data->kck, NL80211_KCK_LEN);
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_MAC, "kek", NULL,
+ rekey_data->kck, NL80211_KEK_LEN);
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_MAC, "replay ctr", NULL,
+ &rekey_data->replay_ctr, sizeof(rekey_data->replay_ctr));
}
static const struct ieee80211_ops ath12k_ops = {
@@ -7601,14 +10263,17 @@ static const struct ieee80211_ops ath12k_ops = {
.remove_interface = ath12k_mac_op_remove_interface,
.update_vif_offload = ath12k_mac_op_update_vif_offload,
.config = ath12k_mac_op_config,
- .bss_info_changed = ath12k_mac_op_bss_info_changed,
+ .link_info_changed = ath12k_mac_op_link_info_changed,
+ .vif_cfg_changed = ath12k_mac_op_vif_cfg_changed,
+ .change_vif_links = ath12k_mac_op_change_vif_links,
.configure_filter = ath12k_mac_op_configure_filter,
.hw_scan = ath12k_mac_op_hw_scan,
.cancel_hw_scan = ath12k_mac_op_cancel_hw_scan,
.set_key = ath12k_mac_op_set_key,
+ .set_rekey_data = ath12k_mac_op_set_rekey_data,
.sta_state = ath12k_mac_op_sta_state,
.sta_set_txpwr = ath12k_mac_op_sta_set_txpwr,
- .sta_rc_update = ath12k_mac_op_sta_rc_update,
+ .link_sta_rc_update = ath12k_mac_op_link_sta_rc_update,
.conf_tx = ath12k_mac_op_conf_tx,
.set_antenna = ath12k_mac_op_set_antenna,
.get_antenna = ath12k_mac_op_get_antenna,
@@ -7627,6 +10292,13 @@ static const struct ieee80211_ops ath12k_ops = {
.sta_statistics = ath12k_mac_op_sta_statistics,
.remain_on_channel = ath12k_mac_op_remain_on_channel,
.cancel_remain_on_channel = ath12k_mac_op_cancel_remain_on_channel,
+ .change_sta_links = ath12k_mac_op_change_sta_links,
+ .can_activate_links = ath12k_mac_op_can_activate_links,
+#ifdef CONFIG_PM
+ .suspend = ath12k_wow_op_suspend,
+ .resume = ath12k_wow_op_resume,
+ .set_wakeup = ath12k_wow_op_set_wakeup,
+#endif
};
static void ath12k_mac_update_ch_list(struct ath12k *ar,
@@ -7643,6 +10315,9 @@ static void ath12k_mac_update_ch_list(struct ath12k *ar,
band->channels[i].center_freq > freq_high)
band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
}
+
+ ar->freq_range.start_freq = MHZ_TO_KHZ(freq_low);
+ ar->freq_range.end_freq = MHZ_TO_KHZ(freq_high);
}
static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band)
@@ -7667,6 +10342,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
{
struct ieee80211_supported_band *band;
struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap;
+ struct ath12k_hw *ah = ar->ah;
void *channels;
u32 phy_id;
@@ -7721,6 +10397,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
ath12k_mac_update_ch_list(ar, band,
reg_cap->low_5ghz_chan,
reg_cap->high_5ghz_chan);
+ ah->use_6ghz_regd = true;
}
if (reg_cap->low_5ghz_chan < ATH12K_MIN_6G_FREQ) {
@@ -7757,10 +10434,12 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
static u16 ath12k_mac_get_ifmodes(struct ath12k_hw *ah)
{
- struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k *ar;
+ int i;
u16 interface_modes = U16_MAX;
- interface_modes &= ar->ab->hw_params->interface_modes;
+ for_each_ar(ah, ar, i)
+ interface_modes &= ar->ab->hw_params->interface_modes;
return interface_modes == U16_MAX ? 0 : interface_modes;
}
@@ -7768,36 +10447,43 @@ static u16 ath12k_mac_get_ifmodes(struct ath12k_hw *ah)
static bool ath12k_mac_is_iface_mode_enable(struct ath12k_hw *ah,
enum nl80211_iftype type)
{
- struct ath12k *ar = ath12k_ah_to_ar(ah);
- u16 interface_modes, mode;
- bool is_enable = true;
+ struct ath12k *ar;
+ int i;
+ u16 interface_modes, mode = 0;
+ bool is_enable = false;
- mode = BIT(type);
+ if (type == NL80211_IFTYPE_MESH_POINT) {
+ if (IS_ENABLED(CONFIG_MAC80211_MESH))
+ mode = BIT(type);
+ } else {
+ mode = BIT(type);
+ }
- interface_modes = ar->ab->hw_params->interface_modes;
- if (!(interface_modes & mode))
- is_enable = false;
+ for_each_ar(ah, ar, i) {
+ interface_modes = ar->ab->hw_params->interface_modes;
+ if (interface_modes & mode) {
+ is_enable = true;
+ break;
+ }
+ }
return is_enable;
}
-static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
+static int
+ath12k_mac_setup_radio_iface_comb(struct ath12k *ar,
+ struct ieee80211_iface_combination *comb)
{
- struct wiphy *wiphy = ah->hw->wiphy;
- struct ieee80211_iface_combination *combinations;
+ u16 interface_modes = ar->ab->hw_params->interface_modes;
struct ieee80211_iface_limit *limits;
int n_limits, max_interfaces;
bool ap, mesh, p2p;
- ap = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_AP);
- p2p = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_P2P_DEVICE);
+ ap = interface_modes & BIT(NL80211_IFTYPE_AP);
+ p2p = interface_modes & BIT(NL80211_IFTYPE_P2P_DEVICE);
mesh = IS_ENABLED(CONFIG_MAC80211_MESH) &&
- ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_MESH_POINT);
-
- combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
- if (!combinations)
- return -ENOMEM;
+ (interface_modes & BIT(NL80211_IFTYPE_MESH_POINT));
if ((ap || mesh) && !p2p) {
n_limits = 2;
@@ -7814,10 +10500,8 @@ static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
}
limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL);
- if (!limits) {
- kfree(combinations);
+ if (!limits)
return -ENOMEM;
- }
limits[0].max = 1;
limits[0].types |= BIT(NL80211_IFTYPE_STATION);
@@ -7833,46 +10517,205 @@ static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
if (p2p) {
limits[1].types |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO);
+ BIT(NL80211_IFTYPE_P2P_GO);
limits[2].max = 1;
limits[2].types |= BIT(NL80211_IFTYPE_P2P_DEVICE);
}
- combinations[0].limits = limits;
- combinations[0].n_limits = n_limits;
- combinations[0].max_interfaces = max_interfaces;
- combinations[0].num_different_channels = 1;
- combinations[0].beacon_int_infra_match = true;
- combinations[0].beacon_int_min_gcd = 100;
- combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
- BIT(NL80211_CHAN_WIDTH_20) |
- BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80);
+ comb[0].limits = limits;
+ comb[0].n_limits = n_limits;
+ comb[0].max_interfaces = max_interfaces;
+ comb[0].num_different_channels = 1;
+ comb[0].beacon_int_infra_match = true;
+ comb[0].beacon_int_min_gcd = 100;
+ comb[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80);
+
+ return 0;
+}
+
+static int
+ath12k_mac_setup_global_iface_comb(struct ath12k_hw *ah,
+ struct wiphy_radio *radio,
+ u8 n_radio,
+ struct ieee80211_iface_combination *comb)
+{
+ const struct ieee80211_iface_combination *iter_comb;
+ struct ieee80211_iface_limit *limits;
+ int i, j, n_limits;
+ bool ap, mesh, p2p;
+
+ if (!n_radio)
+ return 0;
+
+ ap = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_AP);
+ p2p = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_P2P_DEVICE);
+ mesh = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_MESH_POINT);
+
+ if ((ap || mesh) && !p2p)
+ n_limits = 2;
+ else if (p2p)
+ n_limits = 3;
+ else
+ n_limits = 1;
+
+ limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL);
+ if (!limits)
+ return -ENOMEM;
+
+ for (i = 0; i < n_radio; i++) {
+ iter_comb = radio[i].iface_combinations;
+ for (j = 0; j < iter_comb->n_limits && j < n_limits; j++) {
+ limits[j].types |= iter_comb->limits[j].types;
+ limits[j].max += iter_comb->limits[j].max;
+ }
+
+ comb->max_interfaces += iter_comb->max_interfaces;
+ comb->num_different_channels += iter_comb->num_different_channels;
+ comb->radar_detect_widths |= iter_comb->radar_detect_widths;
+ }
+ comb->limits = limits;
+ comb->n_limits = n_limits;
+ comb->beacon_int_infra_match = true;
+ comb->beacon_int_min_gcd = 100;
+
+ return 0;
+}
+
+static
+void ath12k_mac_cleanup_iface_comb(const struct ieee80211_iface_combination *iface_comb)
+{
+ kfree(iface_comb[0].limits);
+ kfree(iface_comb);
+}
+
+static void ath12k_mac_cleanup_iface_combinations(struct ath12k_hw *ah)
+{
+ struct wiphy *wiphy = ah->hw->wiphy;
+ const struct wiphy_radio *radio;
+ int i;
+
+ if (wiphy->n_radio > 0) {
+ radio = wiphy->radio;
+ for (i = 0; i < wiphy->n_radio; i++)
+ ath12k_mac_cleanup_iface_comb(radio[i].iface_combinations);
+
+ kfree(wiphy->radio);
+ }
+
+ ath12k_mac_cleanup_iface_comb(wiphy->iface_combinations);
+}
+
+static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
+{
+ struct ieee80211_iface_combination *combinations, *comb;
+ struct wiphy *wiphy = ah->hw->wiphy;
+ struct wiphy_radio *radio;
+ struct ath12k *ar;
+ int i, ret;
+
+ combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
+ if (!combinations)
+ return -ENOMEM;
+
+ if (ah->num_radio == 1) {
+ ret = ath12k_mac_setup_radio_iface_comb(&ah->radio[0],
+ combinations);
+ if (ret) {
+ ath12k_hw_warn(ah, "failed to setup radio interface combinations for one radio: %d",
+ ret);
+ goto err_free_combinations;
+ }
+
+ goto out;
+ }
+
+ /* there are multiple radios */
+
+ radio = kcalloc(ah->num_radio, sizeof(*radio), GFP_KERNEL);
+ if (!radio) {
+ ret = -ENOMEM;
+ goto err_free_combinations;
+ }
+
+ for_each_ar(ah, ar, i) {
+ comb = kzalloc(sizeof(*comb), GFP_KERNEL);
+ if (!comb) {
+ ret = -ENOMEM;
+ goto err_free_radios;
+ }
+
+ ret = ath12k_mac_setup_radio_iface_comb(ar, comb);
+ if (ret) {
+ ath12k_hw_warn(ah, "failed to setup radio interface combinations for radio %d: %d",
+ i, ret);
+ kfree(comb);
+ goto err_free_radios;
+ }
+
+ radio[i].freq_range = &ar->freq_range;
+ radio[i].n_freq_range = 1;
+
+ radio[i].iface_combinations = comb;
+ radio[i].n_iface_combinations = 1;
+ }
+
+ ret = ath12k_mac_setup_global_iface_comb(ah, radio, ah->num_radio, combinations);
+ if (ret) {
+ ath12k_hw_warn(ah, "failed to setup global interface combinations: %d",
+ ret);
+ goto err_free_all_radios;
+ }
+
+ wiphy->radio = radio;
+ wiphy->n_radio = ah->num_radio;
+
+out:
wiphy->iface_combinations = combinations;
wiphy->n_iface_combinations = 1;
return 0;
+
+err_free_all_radios:
+ i = ah->num_radio;
+
+err_free_radios:
+ while (i--)
+ ath12k_mac_cleanup_iface_comb(radio[i].iface_combinations);
+
+ kfree(radio);
+
+err_free_combinations:
+ kfree(combinations);
+
+ return ret;
}
static const u8 ath12k_if_types_ext_capa[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
};
static const u8 ath12k_if_types_ext_capa_sta[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
[9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
};
static const u8 ath12k_if_types_ext_capa_ap[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
[9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT,
+ [10] = WLAN_EXT_CAPA11_EMA_SUPPORT,
};
-static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = {
+static struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = {
{
.extended_capabilities = ath12k_if_types_ext_capa,
.extended_capabilities_mask = ath12k_if_types_ext_capa,
@@ -7889,6 +10732,8 @@ static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = {
.extended_capabilities_mask = ath12k_if_types_ext_capa_ap,
.extended_capabilities_len =
sizeof(ath12k_if_types_ext_capa_ap),
+ .eml_capabilities = 0,
+ .mld_capa_and_ops = 0,
},
};
@@ -7905,17 +10750,20 @@ static void ath12k_mac_cleanup_unregister(struct ath12k *ar)
static void ath12k_mac_hw_unregister(struct ath12k_hw *ah)
{
struct ieee80211_hw *hw = ah->hw;
- struct wiphy *wiphy = hw->wiphy;
- struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k *ar;
+ int i;
- cancel_work_sync(&ar->regd_update_work);
+ for_each_ar(ah, ar, i) {
+ cancel_work_sync(&ar->regd_update_work);
+ ath12k_debugfs_unregister(ar);
+ }
ieee80211_unregister_hw(hw);
- ath12k_mac_cleanup_unregister(ar);
+ for_each_ar(ah, ar, i)
+ ath12k_mac_cleanup_unregister(ar);
- kfree(wiphy->iface_combinations[0].limits);
- kfree(wiphy->iface_combinations);
+ ath12k_mac_cleanup_iface_combinations(ah);
SET_IEEE80211_DEV(hw, NULL);
}
@@ -7952,7 +10800,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
{
struct ieee80211_hw *hw = ah->hw;
struct wiphy *wiphy = hw->wiphy;
- struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
struct ath12k_base *ab = ar->ab;
struct ath12k_pdev *pdev;
struct ath12k_pdev_cap *cap;
@@ -7967,39 +10815,77 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
WLAN_CIPHER_SUITE_GCMP_256,
WLAN_CIPHER_SUITE_CCMP_256,
};
- int ret;
- u32 ht_cap = 0;
+ int ret, i, j;
+ u32 ht_cap = U32_MAX, antennas_rx = 0, antennas_tx = 0;
+ bool is_6ghz = false, is_raw_mode = false, is_monitor_disable = false;
+ u8 *mac_addr = NULL;
+ u8 mbssid_max_interfaces = 0;
- pdev = ar->pdev;
+ wiphy->max_ap_assoc_sta = 0;
- if (ab->pdevs_macaddr_valid)
- ether_addr_copy(ar->mac_addr, pdev->mac_addr);
- else
- ether_addr_copy(ar->mac_addr, ab->mac_addr);
+ for_each_ar(ah, ar, i) {
+ u32 ht_cap_info = 0;
- ret = ath12k_mac_setup_register(ar, &ht_cap, hw->wiphy->bands);
- if (ret)
- goto out;
+ pdev = ar->pdev;
+ if (ar->ab->pdevs_macaddr_valid) {
+ ether_addr_copy(ar->mac_addr, pdev->mac_addr);
+ } else {
+ ether_addr_copy(ar->mac_addr, ar->ab->mac_addr);
+ ar->mac_addr[4] += ar->pdev_idx;
+ }
+
+ ret = ath12k_mac_setup_register(ar, &ht_cap_info, hw->wiphy->bands);
+ if (ret)
+ goto err_cleanup_unregister;
- wiphy->max_ap_assoc_sta = ar->max_num_stations;
+ /* 6 GHz does not support HT Cap, hence do not consider it */
+ if (!ar->supports_6ghz)
+ ht_cap &= ht_cap_info;
- cap = &pdev->cap;
+ wiphy->max_ap_assoc_sta += ar->max_num_stations;
- wiphy->available_antennas_rx = cap->rx_chain_mask;
- wiphy->available_antennas_tx = cap->tx_chain_mask;
+ /* Advertise the max antenna support of all radios, driver can handle
+ * per pdev specific antenna setting based on pdev cap when antenna
+ * changes are made
+ */
+ cap = &pdev->cap;
+
+ antennas_rx = max_t(u32, antennas_rx, cap->rx_chain_mask);
+ antennas_tx = max_t(u32, antennas_tx, cap->tx_chain_mask);
+
+ if (ar->supports_6ghz)
+ is_6ghz = true;
+
+ if (test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags))
+ is_raw_mode = true;
- SET_IEEE80211_PERM_ADDR(hw, ar->mac_addr);
+ if (!ar->ab->hw_params->supports_monitor)
+ is_monitor_disable = true;
+
+ if (i == 0)
+ mac_addr = ar->mac_addr;
+ else
+ mac_addr = ab->mac_addr;
+
+ mbssid_max_interfaces += TARGET_NUM_VDEVS;
+ }
+
+ wiphy->available_antennas_rx = antennas_rx;
+ wiphy->available_antennas_tx = antennas_tx;
+
+ SET_IEEE80211_PERM_ADDR(hw, mac_addr);
SET_IEEE80211_DEV(hw, ab->dev);
ret = ath12k_mac_setup_iface_combinations(ah);
if (ret) {
ath12k_err(ab, "failed to setup interface combinations: %d\n", ret);
- goto err_cleanup_unregister;
+ goto err_complete_cleanup_unregister;
}
wiphy->interface_modes = ath12k_mac_get_ifmodes(ah);
- if (wiphy->bands[NL80211_BAND_2GHZ] &&
+ if (ah->num_radio == 1 &&
+ wiphy->bands[NL80211_BAND_2GHZ] &&
wiphy->bands[NL80211_BAND_5GHZ] &&
wiphy->bands[NL80211_BAND_6GHZ])
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
@@ -8019,7 +10905,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
ieee80211_hw_set(hw, SUPPORTS_TX_FRAG);
ieee80211_hw_set(hw, REPORTS_LOW_ACK);
- if (ht_cap & WMI_HT_CAP_ENABLED) {
+ if ((ht_cap & WMI_HT_CAP_ENABLED) || is_6ghz) {
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
@@ -8034,7 +10920,8 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
* for each band for a dual band capable radio. It will be tricky to
* handle it when the ht capability different for each band.
*/
- if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS)
+ if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS ||
+ (is_6ghz && ab->hw_params->supports_dynamic_smps_6ghz))
wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
@@ -8050,6 +10937,21 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
NL80211_FEATURE_AP_SCAN;
+ /* MLO is not yet supported so disable Wireless Extensions for now
+ * to make sure ath12k users don't use it. This flag can be removed
+ * once WIPHY_FLAG_SUPPORTS_MLO is enabled.
+ */
+ wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT;
+
+ /* Copy over MLO related capabilities received from
+ * WMI_SERVICE_READY_EXT2_EVENT if single_chip_mlo_supp is set.
+ */
+ if (ab->ag->mlo_capable) {
+ ath12k_iftypes_ext_capa[2].eml_capabilities = cap->eml_cap;
+ ath12k_iftypes_ext_capa[2].mld_capa_and_ops = cap->mld_cap;
+ wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+ }
+
hw->queues = ATH12K_HW_MAX_QUEUES;
wiphy->tx_queue_len = ATH12K_QUEUE_LEN;
hw->offchannel_tx_hw_queue = ATH12K_HW_MAX_QUEUES - 1;
@@ -8057,6 +10959,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
hw->vif_data_size = sizeof(struct ath12k_vif);
hw->sta_data_size = sizeof(struct ath12k_sta);
+ hw->extra_tx_headroom = ab->hw_params->iova_mask;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
@@ -8067,7 +10970,10 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
wiphy->iftype_ext_capab = ath12k_iftypes_ext_capa;
wiphy->num_iftype_ext_capab = ARRAY_SIZE(ath12k_iftypes_ext_capa);
- if (ar->supports_6ghz) {
+ wiphy->mbssid_max_interfaces = mbssid_max_interfaces;
+ wiphy->ema_max_profile_periodicity = TARGET_EMA_MAX_PROFILE_PERIOD;
+
+ if (is_6ghz) {
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_FILS_DISCOVERY);
wiphy_ext_feature_set(wiphy,
@@ -8078,19 +10984,37 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
ath12k_reg_init(hw);
- if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) {
+ if (!is_raw_mode) {
hw->netdev_features = NETIF_F_HW_CSUM;
ieee80211_hw_set(hw, SW_CRYPTO_CONTROL);
ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
}
+ if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
+ wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS;
+ wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
+ wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH;
+ wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS;
+ wiphy->max_sched_scan_plan_interval =
+ WMI_PNO_MAX_SCHED_SCAN_PLAN_INT;
+ wiphy->max_sched_scan_plan_iterations =
+ WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS;
+ wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
+ }
+
+ ret = ath12k_wow_init(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to init wow: %d\n", ret);
+ goto err_cleanup_if_combs;
+ }
+
ret = ieee80211_register_hw(hw);
if (ret) {
ath12k_err(ab, "ieee80211 registration failed: %d\n", ret);
- goto err_free_if_combs;
+ goto err_cleanup_if_combs;
}
- if (!ab->hw_params->supports_monitor)
+ if (is_monitor_disable)
/* There's a race between calling ieee80211_register_hw()
* and here where the monitor mode is enabled for a little
* while. But that time is so short and in practise it make
@@ -8098,26 +11022,37 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
*/
wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR);
- /* Apply the regd received during initialization */
- ret = ath12k_regd_update(ar, true);
- if (ret) {
- ath12k_err(ar->ab, "ath12k regd update failed: %d\n", ret);
- goto err_unregister_hw;
+ for_each_ar(ah, ar, i) {
+ /* Apply the regd received during initialization */
+ ret = ath12k_regd_update(ar, true);
+ if (ret) {
+ ath12k_err(ar->ab, "ath12k regd update failed: %d\n", ret);
+ goto err_unregister_hw;
+ }
+
+ ath12k_debugfs_register(ar);
}
return 0;
err_unregister_hw:
+ for_each_ar(ah, ar, i)
+ ath12k_debugfs_unregister(ar);
+
ieee80211_unregister_hw(hw);
-err_free_if_combs:
- kfree(wiphy->iface_combinations[0].limits);
- kfree(wiphy->iface_combinations);
+err_cleanup_if_combs:
+ ath12k_mac_cleanup_iface_combinations(ah);
+
+err_complete_cleanup_unregister:
+ i = ah->num_radio;
err_cleanup_unregister:
- ath12k_mac_cleanup_unregister(ar);
+ for (j = 0; j < i; j++) {
+ ar = ath12k_ah_to_ar(ah, j);
+ ath12k_mac_cleanup_unregister(ar);
+ }
-out:
SET_IEEE80211_DEV(hw, NULL);
return ret;
@@ -8141,11 +11076,12 @@ static void ath12k_mac_setup(struct ath12k *ar)
ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask;
ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask);
ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask);
+ ar->scan.arvif = NULL;
spin_lock_init(&ar->data_lock);
INIT_LIST_HEAD(&ar->arvifs);
INIT_LIST_HEAD(&ar->ppdu_stats_info);
- mutex_init(&ar->conf_mutex);
+
init_completion(&ar->vdev_setup_done);
init_completion(&ar->vdev_delete_done);
init_completion(&ar->peer_assoc_done);
@@ -8155,41 +11091,179 @@ static void ath12k_mac_setup(struct ath12k *ar)
init_completion(&ar->scan.started);
init_completion(&ar->scan.completed);
init_completion(&ar->scan.on_channel);
+ init_completion(&ar->mlo_setup_done);
INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work);
+ wiphy_work_init(&ar->scan.vdev_clean_wk, ath12k_scan_vdev_clean_work);
INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work);
- INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
+ wiphy_work_init(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
- clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
}
-int ath12k_mac_register(struct ath12k_base *ab)
+static int __ath12k_mac_mlo_setup(struct ath12k *ar)
{
- struct ath12k_hw *ah;
- int i;
+ u8 num_link = 0, partner_link_id[ATH12K_GROUP_MAX_RADIO] = {};
+ struct ath12k_base *partner_ab, *ab = ar->ab;
+ struct ath12k_hw_group *ag = ab->ag;
+ struct wmi_mlo_setup_arg mlo = {};
+ struct ath12k_pdev *pdev;
+ unsigned long time_left;
+ int i, j, ret;
+
+ lockdep_assert_held(&ag->mutex);
+
+ reinit_completion(&ar->mlo_setup_done);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ partner_ab = ag->ab[i];
+
+ for (j = 0; j < partner_ab->num_radios; j++) {
+ pdev = &partner_ab->pdevs[j];
+
+ /* Avoid the self link */
+ if (ar == pdev->ar)
+ continue;
+
+ partner_link_id[num_link] = pdev->hw_link_id;
+ num_link++;
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "device %d pdev %d hw_link_id %d num_link %d\n",
+ i, j, pdev->hw_link_id, num_link);
+ }
+ }
+
+ mlo.group_id = cpu_to_le32(ag->id);
+ mlo.partner_link_id = partner_link_id;
+ mlo.num_partner_links = num_link;
+ ar->mlo_setup_status = 0;
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "group id %d num_link %d\n", ag->id, num_link);
+
+ ret = ath12k_wmi_mlo_setup(ar, &mlo);
+ if (ret) {
+ ath12k_err(ab, "failed to send setup MLO WMI command for pdev %d: %d\n",
+ ar->pdev_idx, ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->mlo_setup_done,
+ WMI_MLO_CMD_TIMEOUT_HZ);
+
+ if (!time_left || ar->mlo_setup_status)
+ return ar->mlo_setup_status ? : -ETIMEDOUT;
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "mlo setup done for pdev %d\n", ar->pdev_idx);
+
+ return 0;
+}
+
+static int __ath12k_mac_mlo_teardown(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
int ret;
- if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
+ if (test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags))
return 0;
- /* Initialize channel counters frequency value in hertz */
- ab->cc_freq_hz = 320000;
- ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+ ret = ath12k_wmi_mlo_teardown(ar);
+ if (ret) {
+ ath12k_warn(ab, "failed to send MLO teardown WMI command for pdev %d: %d\n",
+ ar->pdev_idx, ret);
+ return ret;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "mlo teardown for pdev %d\n", ar->pdev_idx);
+
+ return 0;
+}
+
+int ath12k_mac_mlo_setup(struct ath12k_hw_group *ag)
+{
+ struct ath12k_hw *ah;
+ struct ath12k *ar;
+ int ret;
+ int i, j;
+
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ag->ah[i];
+ if (!ah)
+ continue;
+
+ for_each_ar(ah, ar, j) {
+ ar = &ah->radio[j];
+ ret = __ath12k_mac_mlo_setup(ar);
+ if (ret) {
+ ath12k_err(ar->ab, "failed to setup MLO: %d\n", ret);
+ goto err_setup;
+ }
+ }
+ }
+
+ return 0;
+
+err_setup:
+ for (i = i - 1; i >= 0; i--) {
+ ah = ag->ah[i];
+ if (!ah)
+ continue;
+
+ for (j = j - 1; j >= 0; j--) {
+ ar = &ah->radio[j];
+ if (!ar)
+ continue;
+
+ __ath12k_mac_mlo_teardown(ar);
+ }
+ }
+
+ return ret;
+}
+
+void ath12k_mac_mlo_teardown(struct ath12k_hw_group *ag)
+{
+ struct ath12k_hw *ah;
+ struct ath12k *ar;
+ int ret, i, j;
+
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ag->ah[i];
+ if (!ah)
+ continue;
+
+ for_each_ar(ah, ar, j) {
+ ar = &ah->radio[j];
+ ret = __ath12k_mac_mlo_teardown(ar);
+ if (ret) {
+ ath12k_err(ar->ab, "failed to teardown MLO: %d\n", ret);
+ break;
+ }
+ }
+ }
+}
+
+int ath12k_mac_register(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab = ag->ab[0];
+ struct ath12k_hw *ah;
+ int i;
+ int ret;
- for (i = 0; i < ab->num_hw; i++) {
- ah = ab->ah[i];
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ath12k_ag_to_ah(ag, i);
ret = ath12k_mac_hw_register(ah);
if (ret)
goto err;
}
+ set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+
return 0;
err:
for (i = i - 1; i >= 0; i--) {
- ah = ab->ah[i];
+ ah = ath12k_ag_to_ah(ag, i);
if (!ah)
continue;
@@ -8199,13 +11273,16 @@ err:
return ret;
}
-void ath12k_mac_unregister(struct ath12k_base *ab)
+void ath12k_mac_unregister(struct ath12k_hw_group *ag)
{
+ struct ath12k_base *ab = ag->ab[0];
struct ath12k_hw *ah;
int i;
- for (i = ab->num_hw - 1; i >= 0; i--) {
- ah = ab->ah[i];
+ clear_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+
+ for (i = ag->num_hw - 1; i >= 0; i--) {
+ ah = ath12k_ag_to_ah(ag, i);
if (!ah)
continue;
@@ -8218,12 +11295,13 @@ static void ath12k_mac_hw_destroy(struct ath12k_hw *ah)
ieee80211_free_hw(ah->hw);
}
-static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab,
+static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_hw_group *ag,
struct ath12k_pdev_map *pdev_map,
u8 num_pdev_map)
{
struct ieee80211_hw *hw;
struct ath12k *ar;
+ struct ath12k_base *ab;
struct ath12k_pdev *pdev;
struct ath12k_hw *ah;
int i;
@@ -8238,67 +11316,132 @@ static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab,
ah->hw = hw;
ah->num_radio = num_pdev_map;
+ mutex_init(&ah->hw_mutex);
+ INIT_LIST_HEAD(&ah->ml_peers);
+
for (i = 0; i < num_pdev_map; i++) {
ab = pdev_map[i].ab;
pdev_idx = pdev_map[i].pdev_idx;
pdev = &ab->pdevs[pdev_idx];
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_ah_to_ar(ah, i);
ar->ah = ah;
ar->ab = ab;
- ar->hw_link_id = i;
+ ar->hw_link_id = pdev->hw_link_id;
ar->pdev = pdev;
ar->pdev_idx = pdev_idx;
pdev->ar = ar;
+ ag->hw_links[ar->hw_link_id].device_id = ab->device_id;
+ ag->hw_links[ar->hw_link_id].pdev_idx = pdev_idx;
+
ath12k_mac_setup(ar);
+ ath12k_dp_pdev_pre_alloc(ar);
}
return ah;
}
-void ath12k_mac_destroy(struct ath12k_base *ab)
+void ath12k_mac_destroy(struct ath12k_hw_group *ag)
{
struct ath12k_pdev *pdev;
- int i;
+ struct ath12k_base *ab = ag->ab[0];
+ int i, j;
+ struct ath12k_hw *ah;
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- if (!pdev->ar)
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
continue;
- pdev->ar = NULL;
+ for (j = 0; j < ab->num_radios; j++) {
+ pdev = &ab->pdevs[j];
+ if (!pdev->ar)
+ continue;
+ pdev->ar = NULL;
+ }
}
- for (i = 0; i < ab->num_hw; i++) {
- if (!ab->ah[i])
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ath12k_ag_to_ah(ag, i);
+ if (!ah)
continue;
- ath12k_mac_hw_destroy(ab->ah[i]);
- ab->ah[i] = NULL;
+ ath12k_mac_hw_destroy(ah);
+ ath12k_ag_set_ah(ag, i, NULL);
}
}
-int ath12k_mac_allocate(struct ath12k_base *ab)
+static void ath12k_mac_set_device_defaults(struct ath12k_base *ab)
{
+ /* Initialize channel counters frequency value in hertz */
+ ab->cc_freq_hz = 320000;
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+}
+
+int ath12k_mac_allocate(struct ath12k_hw_group *ag)
+{
+ struct ath12k_pdev_map pdev_map[ATH12K_GROUP_MAX_RADIO];
+ int mac_id, device_id, total_radio, num_hw;
+ struct ath12k_base *ab;
struct ath12k_hw *ah;
- struct ath12k_pdev_map pdev_map[MAX_RADIOS];
int ret, i, j;
u8 radio_per_hw;
- if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
- return 0;
+ total_radio = 0;
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
- ab->num_hw = ab->num_radios;
- radio_per_hw = 1;
+ ath12k_mac_set_device_defaults(ab);
+ total_radio += ab->num_radios;
+ }
+
+ if (!total_radio)
+ return -EINVAL;
- for (i = 0; i < ab->num_hw; i++) {
+ if (WARN_ON(total_radio > ATH12K_GROUP_MAX_RADIO))
+ return -ENOSPC;
+
+ /* All pdev get combined and register as single wiphy based on
+ * hardware group which participate in multi-link operation else
+ * each pdev get register separately.
+ */
+ if (ag->mlo_capable)
+ radio_per_hw = total_radio;
+ else
+ radio_per_hw = 1;
+
+ num_hw = total_radio / radio_per_hw;
+
+ ag->num_hw = 0;
+ device_id = 0;
+ mac_id = 0;
+ for (i = 0; i < num_hw; i++) {
for (j = 0; j < radio_per_hw; j++) {
+ if (device_id >= ag->num_devices || !ag->ab[device_id]) {
+ ret = -ENOSPC;
+ goto err;
+ }
+
+ ab = ag->ab[device_id];
pdev_map[j].ab = ab;
- pdev_map[j].pdev_idx = (i * radio_per_hw) + j;
+ pdev_map[j].pdev_idx = mac_id;
+ mac_id++;
+
+ /* If mac_id falls beyond the current device MACs then
+ * move to next device
+ */
+ if (mac_id >= ab->num_radios) {
+ mac_id = 0;
+ device_id++;
+ }
}
- ah = ath12k_mac_hw_allocate(ab, pdev_map, radio_per_hw);
+ ab = pdev_map->ab;
+
+ ah = ath12k_mac_hw_allocate(ag, pdev_map, radio_per_hw);
if (!ah) {
ath12k_warn(ab, "failed to allocate mac80211 hw device for hw_idx %d\n",
i);
@@ -8306,21 +11449,54 @@ int ath12k_mac_allocate(struct ath12k_base *ab)
goto err;
}
- ab->ah[i] = ah;
- }
+ ah->dev = ab->dev;
- ath12k_dp_pdev_pre_alloc(ab);
+ ag->ah[i] = ah;
+ ag->num_hw++;
+ }
return 0;
err:
for (i = i - 1; i >= 0; i--) {
- if (!ab->ah[i])
+ ah = ath12k_ag_to_ah(ag, i);
+ if (!ah)
continue;
- ath12k_mac_hw_destroy(ab->ah[i]);
- ab->ah[i] = NULL;
+ ath12k_mac_hw_destroy(ah);
+ ath12k_ag_set_ah(ag, i, NULL);
}
return ret;
}
+
+int ath12k_mac_vif_set_keepalive(struct ath12k_link_vif *arvif,
+ enum wmi_sta_keepalive_method method,
+ u32 interval)
+{
+ struct wmi_sta_keepalive_arg arg = {};
+ struct ath12k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ if (!test_bit(WMI_TLV_SERVICE_STA_KEEP_ALIVE, ar->ab->wmi_ab.svc_map))
+ return 0;
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.enabled = 1;
+ arg.method = method;
+ arg.interval = interval;
+
+ ret = ath12k_wmi_sta_keepalive(ar, &arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set keepalive on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index 3f5e1be0dff9..3594729b6397 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -9,10 +9,12 @@
#include <net/mac80211.h>
#include <net/cfg80211.h>
+#include "wmi.h"
struct ath12k;
struct ath12k_base;
struct ath12k_hw;
+struct ath12k_hw_group;
struct ath12k_pdev_map;
struct ath12k_generic_iter {
@@ -40,6 +42,15 @@ struct ath12k_generic_iter {
#define ATH12K_TX_POWER_MAX_VAL 70
#define ATH12K_TX_POWER_MIN_VAL 0
+#define ATH12K_DEFAULT_LINK_ID 0
+#define ATH12K_INVALID_LINK_ID 255
+
+/* Default link after the IEEE802.11 defined Max link id limit
+ * for driver usage purpose.
+ */
+#define ATH12K_DEFAULT_SCAN_LINK IEEE80211_MLD_MAX_NUM_LINKS
+#define ATH12K_NUM_MAX_LINKS (IEEE80211_MLD_MAX_NUM_LINKS + 1)
+
enum ath12k_supported_bw {
ATH12K_BW_20 = 0,
ATH12K_BW_40 = 1,
@@ -48,12 +59,17 @@ enum ath12k_supported_bw {
ATH12K_BW_320 = 4,
};
+struct ath12k_mac_get_any_chanctx_conf_arg {
+ struct ath12k *ar;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+};
+
extern const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default;
-void ath12k_mac_destroy(struct ath12k_base *ab);
-void ath12k_mac_unregister(struct ath12k_base *ab);
-int ath12k_mac_register(struct ath12k_base *ab);
-int ath12k_mac_allocate(struct ath12k_base *ab);
+void ath12k_mac_destroy(struct ath12k_hw_group *ag);
+void ath12k_mac_unregister(struct ath12k_hw_group *ag);
+int ath12k_mac_register(struct ath12k_hw_group *ag);
+int ath12k_mac_allocate(struct ath12k_hw_group *ag);
int ath12k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
u16 *rate);
u8 ath12k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
@@ -64,9 +80,9 @@ u8 ath12k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
void __ath12k_mac_scan_finish(struct ath12k *ar);
void ath12k_mac_scan_finish(struct ath12k *ar);
-struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id);
-struct ath12k_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab,
- u32 vdev_id);
+struct ath12k_link_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id);
+struct ath12k_link_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab,
+ u32 vdev_id);
struct ath12k *ath12k_mac_get_ar_by_vdev_id(struct ath12k_base *ab, u32 vdev_id);
struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id);
@@ -78,4 +94,19 @@ enum ath12k_supported_bw ath12k_mac_mac80211_bw_to_ath12k_bw(enum rate_info_bw b
enum hal_encrypt_type ath12k_dp_tx_get_encrypt_type(u32 cipher);
int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable);
int ath12k_mac_rfkill_config(struct ath12k *ar);
+int ath12k_mac_wait_tx_complete(struct ath12k *ar);
+void ath12k_mac_handle_beacon(struct ath12k *ar, struct sk_buff *skb);
+void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id);
+int ath12k_mac_vif_set_keepalive(struct ath12k_link_vif *arvif,
+ enum wmi_sta_keepalive_method method,
+ u32 interval);
+u8 ath12k_mac_get_target_pdev_id(struct ath12k *ar);
+int ath12k_mac_mlo_setup(struct ath12k_hw_group *ag);
+int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag);
+void ath12k_mac_mlo_teardown(struct ath12k_hw_group *ag);
+int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif);
+void ath12k_mac_get_any_chanctx_conf_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data);
+
#endif
diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c
index adb8c3ec1950..2f6d14382ed7 100644
--- a/drivers/net/wireless/ath/ath12k/mhi.c
+++ b/drivers/net/wireless/ath/ath12k/mhi.c
@@ -16,37 +16,10 @@
#define MHI_TIMEOUT_DEFAULT_MS 90000
#define OTP_INVALID_BOARD_ID 0xFFFF
#define OTP_VALID_DUALMAC_BOARD_ID_MASK 0x1000
+#define MHI_CB_INVALID 0xff
static const struct mhi_channel_config ath12k_mhi_channels_qcn9274[] = {
{
- .num = 0,
- .name = "LOOPBACK",
- .num_elements = 32,
- .event_ring = 1,
- .dir = DMA_TO_DEVICE,
- .ee_mask = 0x4,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
- {
- .num = 1,
- .name = "LOOPBACK",
- .num_elements = 32,
- .event_ring = 1,
- .dir = DMA_FROM_DEVICE,
- .ee_mask = 0x4,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
- {
.num = 20,
.name = "IPCR",
.num_elements = 32,
@@ -112,34 +85,6 @@ const struct mhi_controller_config ath12k_mhi_config_qcn9274 = {
static const struct mhi_channel_config ath12k_mhi_channels_wcn7850[] = {
{
- .num = 0,
- .name = "LOOPBACK",
- .num_elements = 32,
- .event_ring = 0,
- .dir = DMA_TO_DEVICE,
- .ee_mask = 0x4,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
- {
- .num = 1,
- .name = "LOOPBACK",
- .num_elements = 32,
- .event_ring = 0,
- .dir = DMA_FROM_DEVICE,
- .ee_mask = 0x4,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
- {
.num = 20,
.name = "IPCR",
.num_elements = 64,
@@ -196,7 +141,7 @@ const struct mhi_controller_config ath12k_mhi_config_wcn7850 = {
.max_channels = 128,
.timeout_ms = 2000,
.use_bounce_buf = false,
- .buf_len = 0,
+ .buf_len = 8192,
.num_channels = ARRAY_SIZE(ath12k_mhi_channels_wcn7850),
.ch_cfg = ath12k_mhi_channels_wcn7850,
.num_events = ARRAY_SIZE(ath12k_mhi_events_wcn7850),
@@ -324,6 +269,7 @@ static void ath12k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
enum mhi_callback cb)
{
struct ath12k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev);
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
ath12k_dbg(ab, ATH12K_DBG_BOOT, "mhi notify status reason %s\n",
ath12k_mhi_op_callback_to_str(cb));
@@ -333,12 +279,20 @@ static void ath12k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
ath12k_warn(ab, "firmware crashed: MHI_CB_SYS_ERROR\n");
break;
case MHI_CB_EE_RDDM:
+ if (ab_pci->mhi_pre_cb == MHI_CB_EE_RDDM) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "do not queue again for consecutive RDDM event\n");
+ break;
+ }
+
if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags)))
queue_work(ab->workqueue_aux, &ab->reset_work);
break;
default:
break;
}
+
+ ab_pci->mhi_pre_cb = cb;
}
static int ath12k_mhi_op_read_reg(struct mhi_controller *mhi_cntrl,
@@ -369,6 +323,7 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
if (!mhi_ctrl)
return -ENOMEM;
+ ab_pci->mhi_pre_cb = MHI_CB_INVALID;
ab_pci->mhi_ctrl = mhi_ctrl;
mhi_ctrl->cntrl_dev = ab->dev;
mhi_ctrl->regs = ab->mem;
@@ -385,7 +340,6 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
"failed to read board id\n");
} else if (board_id & OTP_VALID_DUALMAC_BOARD_ID_MASK) {
dualmac = true;
- ab->slo_capable = false;
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"dualmac fw selected for board id: %x\n", board_id);
}
@@ -470,6 +424,8 @@ static char *ath12k_mhi_state_to_str(enum ath12k_mhi_state mhi_state)
return "POWER_ON";
case ATH12K_MHI_POWER_OFF:
return "POWER_OFF";
+ case ATH12K_MHI_POWER_OFF_KEEP_DEV:
+ return "POWER_OFF_KEEP_DEV";
case ATH12K_MHI_FORCE_POWER_OFF:
return "FORCE_POWER_OFF";
case ATH12K_MHI_SUSPEND:
@@ -501,6 +457,7 @@ static void ath12k_mhi_set_state_bit(struct ath12k_pci *ab_pci,
set_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state);
break;
case ATH12K_MHI_POWER_OFF:
+ case ATH12K_MHI_POWER_OFF_KEEP_DEV:
case ATH12K_MHI_FORCE_POWER_OFF:
clear_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state);
clear_bit(ATH12K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state);
@@ -544,6 +501,7 @@ static int ath12k_mhi_check_state_bit(struct ath12k_pci *ab_pci,
return 0;
break;
case ATH12K_MHI_POWER_OFF:
+ case ATH12K_MHI_POWER_OFF_KEEP_DEV:
case ATH12K_MHI_SUSPEND:
if (test_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state) &&
!test_bit(ATH12K_MHI_SUSPEND, &ab_pci->mhi_state))
@@ -594,12 +552,27 @@ static int ath12k_mhi_set_state(struct ath12k_pci *ab_pci,
ret = 0;
break;
case ATH12K_MHI_POWER_ON:
- ret = mhi_async_power_up(ab_pci->mhi_ctrl);
+ /* In case of resume, QRTR's resume_early() is called
+ * right after ath12k' resume_early(). Since QRTR requires
+ * MHI mission mode state when preparing IPCR channels
+ * (see ee_mask of that channel), we need to use the 'sync'
+ * version here to make sure MHI is in that state when we
+ * return. Or QRTR might resume before that state comes,
+ * and as a result it fails.
+ *
+ * The 'sync' version works for non-resume (normal power on)
+ * case as well.
+ */
+ ret = mhi_sync_power_up(ab_pci->mhi_ctrl);
break;
case ATH12K_MHI_POWER_OFF:
mhi_power_down(ab_pci->mhi_ctrl, true);
ret = 0;
break;
+ case ATH12K_MHI_POWER_OFF_KEEP_DEV:
+ mhi_power_down_keep_dev(ab_pci->mhi_ctrl, true);
+ ret = 0;
+ break;
case ATH12K_MHI_FORCE_POWER_OFF:
mhi_power_down(ab_pci->mhi_ctrl, false);
ret = 0;
@@ -653,9 +626,17 @@ out:
return ret;
}
-void ath12k_mhi_stop(struct ath12k_pci *ab_pci)
+void ath12k_mhi_stop(struct ath12k_pci *ab_pci, bool is_suspend)
{
- ath12k_mhi_set_state(ab_pci, ATH12K_MHI_POWER_OFF);
+ /* During suspend we need to use mhi_power_down_keep_dev()
+ * workaround, otherwise ath12k_core_resume() will timeout
+ * during resume.
+ */
+ if (is_suspend)
+ ath12k_mhi_set_state(ab_pci, ATH12K_MHI_POWER_OFF_KEEP_DEV);
+ else
+ ath12k_mhi_set_state(ab_pci, ATH12K_MHI_POWER_OFF);
+
ath12k_mhi_set_state(ab_pci, ATH12K_MHI_DEINIT);
}
@@ -668,3 +649,8 @@ void ath12k_mhi_resume(struct ath12k_pci *ab_pci)
{
ath12k_mhi_set_state(ab_pci, ATH12K_MHI_RESUME);
}
+
+void ath12k_mhi_coredump(struct mhi_controller *mhi_ctrl, bool in_panic)
+{
+ mhi_download_rddm_image(mhi_ctrl, in_panic);
+}
diff --git a/drivers/net/wireless/ath/ath12k/mhi.h b/drivers/net/wireless/ath/ath12k/mhi.h
index ebc23640ce7a..7358b8477536 100644
--- a/drivers/net/wireless/ath/ath12k/mhi.h
+++ b/drivers/net/wireless/ath/ath12k/mhi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH12K_MHI_H
#define _ATH12K_MHI_H
@@ -22,6 +22,7 @@ enum ath12k_mhi_state {
ATH12K_MHI_DEINIT,
ATH12K_MHI_POWER_ON,
ATH12K_MHI_POWER_OFF,
+ ATH12K_MHI_POWER_OFF_KEEP_DEV,
ATH12K_MHI_FORCE_POWER_OFF,
ATH12K_MHI_SUSPEND,
ATH12K_MHI_RESUME,
@@ -34,7 +35,7 @@ extern const struct mhi_controller_config ath12k_mhi_config_qcn9274;
extern const struct mhi_controller_config ath12k_mhi_config_wcn7850;
int ath12k_mhi_start(struct ath12k_pci *ar_pci);
-void ath12k_mhi_stop(struct ath12k_pci *ar_pci);
+void ath12k_mhi_stop(struct ath12k_pci *ar_pci, bool is_suspend);
int ath12k_mhi_register(struct ath12k_pci *ar_pci);
void ath12k_mhi_unregister(struct ath12k_pci *ar_pci);
void ath12k_mhi_set_mhictrl_reset(struct ath12k_base *ab);
@@ -42,5 +43,5 @@ void ath12k_mhi_clear_vector(struct ath12k_base *ab);
void ath12k_mhi_suspend(struct ath12k_pci *ar_pci);
void ath12k_mhi_resume(struct ath12k_pci *ar_pci);
-
+void ath12k_mhi_coredump(struct mhi_controller *mhi_ctrl, bool in_panic);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/p2p.c b/drivers/net/wireless/ath/ath12k/p2p.c
index d334df720032..84cccf7d91e7 100644
--- a/drivers/net/wireless/ath/ath12k/p2p.c
+++ b/drivers/net/wireless/ath/ath12k/p2p.c
@@ -69,20 +69,20 @@ static size_t ath12k_p2p_noa_ie_len_compute(const struct ath12k_wmi_p2p_noa_info
return len;
}
-static void ath12k_p2p_noa_ie_assign(struct ath12k_vif *arvif, void *ie,
+static void ath12k_p2p_noa_ie_assign(struct ath12k_link_vif *arvif, void *ie,
size_t len)
{
struct ath12k *ar = arvif->ar;
lockdep_assert_held(&ar->data_lock);
- kfree(arvif->u.ap.noa_data);
+ kfree(arvif->ahvif->u.ap.noa_data);
- arvif->u.ap.noa_data = ie;
- arvif->u.ap.noa_len = len;
+ arvif->ahvif->u.ap.noa_data = ie;
+ arvif->ahvif->u.ap.noa_len = len;
}
-static void __ath12k_p2p_noa_update(struct ath12k_vif *arvif,
+static void __ath12k_p2p_noa_update(struct ath12k_link_vif *arvif,
const struct ath12k_wmi_p2p_noa_info *noa)
{
struct ath12k *ar = arvif->ar;
@@ -105,7 +105,7 @@ static void __ath12k_p2p_noa_update(struct ath12k_vif *arvif,
ath12k_p2p_noa_ie_assign(arvif, ie, len);
}
-void ath12k_p2p_noa_update(struct ath12k_vif *arvif,
+void ath12k_p2p_noa_update(struct ath12k_link_vif *arvif,
const struct ath12k_wmi_p2p_noa_info *noa)
{
struct ath12k *ar = arvif->ar;
@@ -118,10 +118,13 @@ void ath12k_p2p_noa_update(struct ath12k_vif *arvif,
static void ath12k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_p2p_noa_arg *arg = data;
+ struct ath12k_link_vif *arvif;
- if (arvif->vdev_id != arg->vdev_id)
+ WARN_ON(!rcu_read_lock_any_held());
+ arvif = &ahvif->deflink;
+ if (arvif->ar != arg->ar || arvif->vdev_id != arg->vdev_id)
return;
ath12k_p2p_noa_update(arvif, arg->noa);
@@ -132,6 +135,7 @@ void ath12k_p2p_noa_update_by_vdev_id(struct ath12k *ar, u32 vdev_id,
{
struct ath12k_p2p_noa_arg arg = {
.vdev_id = vdev_id,
+ .ar = ar,
.noa = noa,
};
diff --git a/drivers/net/wireless/ath/ath12k/p2p.h b/drivers/net/wireless/ath/ath12k/p2p.h
index 5768139a7844..03ee877e6d6b 100644
--- a/drivers/net/wireless/ath/ath12k/p2p.h
+++ b/drivers/net/wireless/ath/ath12k/p2p.h
@@ -12,10 +12,11 @@ struct ath12k_wmi_p2p_noa_info;
struct ath12k_p2p_noa_arg {
u32 vdev_id;
+ struct ath12k *ar;
const struct ath12k_wmi_p2p_noa_info *noa;
};
-void ath12k_p2p_noa_update(struct ath12k_vif *arvif,
+void ath12k_p2p_noa_update(struct ath12k_link_vif *arvif,
const struct ath12k_wmi_p2p_noa_info *noa);
void ath12k_p2p_noa_update_by_vdev_id(struct ath12k *ar, u32 vdev_id,
const struct ath12k_wmi_p2p_noa_info *noa);
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index 14954bc05144..06cff3849ab8 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -7,6 +7,8 @@
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/pci.h>
+#include <linux/time.h>
+#include <linux/vmalloc.h>
#include "pci.h"
#include "core.h"
@@ -350,6 +352,7 @@ static void ath12k_pci_free_ext_irq(struct ath12k_base *ab)
free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
netif_napi_del(&irq_grp->napi);
+ free_netdev(irq_grp->napi_ndev);
}
}
@@ -424,9 +427,9 @@ static void ath12k_pci_sync_ce_irqs(struct ath12k_base *ab)
}
}
-static void ath12k_pci_ce_tasklet(struct tasklet_struct *t)
+static void ath12k_pci_ce_workqueue(struct work_struct *work)
{
- struct ath12k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
+ struct ath12k_ce_pipe *ce_pipe = from_work(ce_pipe, work, intr_wq);
int irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
@@ -448,7 +451,7 @@ static irqreturn_t ath12k_pci_ce_interrupt_handler(int irq, void *arg)
disable_irq_nosync(ab->irq_num[irq_idx]);
- tasklet_schedule(&ce_pipe->intr_tq);
+ queue_work(system_bh_wq, &ce_pipe->intr_wq);
return IRQ_HANDLED;
}
@@ -472,7 +475,8 @@ static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
{
int i;
- clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
+ if (!test_and_clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
+ return;
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
@@ -560,8 +564,9 @@ static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg)
static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
{
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
- int i, j, ret, num_vectors = 0;
+ int i, j, n, ret, num_vectors = 0;
u32 user_base_data = 0, base_vector = 0, base_idx;
+ struct ath12k_ext_irq_grp *irq_grp;
base_idx = ATH12K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX;
ret = ath12k_pci_get_user_msi_assignment(ab, "DP",
@@ -572,13 +577,18 @@ static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
return ret;
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
- struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+ irq_grp = &ab->ext_irq_grp[i];
u32 num_irq = 0;
irq_grp->ab = ab;
irq_grp->grp_id = i;
- init_dummy_netdev(&irq_grp->napi_ndev);
- netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
+ irq_grp->napi_ndev = alloc_netdev_dummy(0);
+ if (!irq_grp->napi_ndev) {
+ ret = -ENOMEM;
+ goto fail_allocate;
+ }
+
+ netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
ath12k_pci_ext_grp_napi_poll);
if (ab->hw_params->ring_mask->tx[i] ||
@@ -611,13 +621,23 @@ static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
if (ret) {
ath12k_err(ab, "failed request irq %d: %d\n",
vector, ret);
- return ret;
+ goto fail_request;
}
}
ath12k_pci_ext_grp_disable(irq_grp);
}
return 0;
+
+fail_request:
+ /* i ->napi_ndev was properly allocated. Free it also */
+ i += 1;
+fail_allocate:
+ for (n = 0; n < i; n++) {
+ irq_grp = &ab->ext_irq_grp[n];
+ free_netdev(irq_grp->napi_ndev);
+ }
+ return ret;
}
static int ath12k_pci_set_irq_affinity_hint(struct ath12k_pci *ab_pci,
@@ -657,7 +677,7 @@ static int ath12k_pci_config_irq(struct ath12k_base *ab)
irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
- tasklet_setup(&ce_pipe->intr_tq, ath12k_pci_ce_tasklet);
+ INIT_WORK(&ce_pipe->intr_wq, ath12k_pci_ce_workqueue);
ret = request_irq(irq, ath12k_pci_ce_interrupt_handler,
ab_pci->irq_flags, irq_name[irq_idx],
@@ -872,7 +892,7 @@ static int ath12k_pci_claim(struct ath12k_pci *ab_pci, struct pci_dev *pdev)
goto release_region;
}
- ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot pci_mem 0x%pK\n", ab->mem);
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot pci_mem 0x%p\n", ab->mem);
return 0;
release_region:
@@ -936,14 +956,15 @@ static void ath12k_pci_update_qrtr_node_id(struct ath12k_base *ab)
static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci)
{
- if (test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags))
+ if (ab_pci->ab->hw_params->supports_aspm &&
+ test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags))
pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_ASPMC,
ab_pci->link_ctl &
PCI_EXP_LNKCTL_ASPMC);
}
-static void ath12k_pci_kill_tasklets(struct ath12k_base *ab)
+static void ath12k_pci_cancel_workqueue(struct ath12k_base *ab)
{
int i;
@@ -953,7 +974,7 @@ static void ath12k_pci_kill_tasklets(struct ath12k_base *ab)
if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
- tasklet_kill(&ce_pipe->intr_tq);
+ cancel_work_sync(&ce_pipe->intr_wq);
}
}
@@ -961,7 +982,7 @@ static void ath12k_pci_ce_irq_disable_sync(struct ath12k_base *ab)
{
ath12k_pci_ce_irqs_disable(ab);
ath12k_pci_sync_ce_irqs(ab);
- ath12k_pci_kill_tasklets(ab);
+ ath12k_pci_cancel_workqueue(ab);
}
int ath12k_pci_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
@@ -1090,18 +1111,21 @@ void ath12k_pci_ext_irq_enable(struct ath12k_base *ab)
{
int i;
- set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
-
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
napi_enable(&irq_grp->napi);
ath12k_pci_ext_grp_enable(irq_grp);
}
+
+ set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
}
void ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
{
+ if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
+ return;
+
__ath12k_pci_ext_irq_disable(ab);
ath12k_pci_sync_ext_irqs(ab);
}
@@ -1126,6 +1150,11 @@ int ath12k_pci_hif_resume(struct ath12k_base *ab)
void ath12k_pci_stop(struct ath12k_base *ab)
{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+
+ if (!test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags))
+ return;
+
ath12k_pci_ce_irq_disable_sync(ab);
ath12k_ce_cleanup_pipes(ab);
}
@@ -1240,6 +1269,186 @@ void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value)
ab_pci->pci_ops->release(ab);
}
+#ifdef CONFIG_ATH12K_COREDUMP
+static int ath12k_pci_coredump_calculate_size(struct ath12k_base *ab, u32 *dump_seg_sz)
+{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
+ struct image_info *rddm_img, *fw_img;
+ struct ath12k_tlv_dump_data *dump_tlv;
+ enum ath12k_fw_crash_dump_type mem_type;
+ u32 len = 0, rddm_tlv_sz = 0, paging_tlv_sz = 0;
+ struct ath12k_dump_file_data *file_data;
+ int i;
+
+ rddm_img = mhi_ctrl->rddm_image;
+ if (!rddm_img) {
+ ath12k_err(ab, "No RDDM dump found\n");
+ return 0;
+ }
+
+ fw_img = mhi_ctrl->fbc_image;
+
+ for (i = 0; i < fw_img->entries ; i++) {
+ if (!fw_img->mhi_buf[i].buf)
+ continue;
+
+ paging_tlv_sz += fw_img->mhi_buf[i].len;
+ }
+ dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA] = paging_tlv_sz;
+
+ for (i = 0; i < rddm_img->entries; i++) {
+ if (!rddm_img->mhi_buf[i].buf)
+ continue;
+
+ rddm_tlv_sz += rddm_img->mhi_buf[i].len;
+ }
+ dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA] = rddm_tlv_sz;
+
+ for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ mem_type = ath12k_coredump_get_dump_type(ab->qmi.target_mem[i].type);
+
+ if (mem_type == FW_CRASH_DUMP_NONE)
+ continue;
+
+ if (mem_type == FW_CRASH_DUMP_TYPE_MAX) {
+ ath12k_dbg(ab, ATH12K_DBG_PCI,
+ "target mem region type %d not supported",
+ ab->qmi.target_mem[i].type);
+ continue;
+ }
+
+ if (!ab->qmi.target_mem[i].paddr)
+ continue;
+
+ dump_seg_sz[mem_type] += ab->qmi.target_mem[i].size;
+ }
+
+ for (i = 0; i < FW_CRASH_DUMP_TYPE_MAX; i++) {
+ if (!dump_seg_sz[i])
+ continue;
+
+ len += sizeof(*dump_tlv) + dump_seg_sz[i];
+ }
+
+ if (len)
+ len += sizeof(*file_data);
+
+ return len;
+}
+
+static void ath12k_pci_coredump_download(struct ath12k_base *ab)
+{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
+ struct image_info *rddm_img, *fw_img;
+ struct timespec64 timestamp;
+ int i, len, mem_idx;
+ enum ath12k_fw_crash_dump_type mem_type;
+ struct ath12k_dump_file_data *file_data;
+ struct ath12k_tlv_dump_data *dump_tlv;
+ size_t hdr_len = sizeof(*file_data);
+ void *buf;
+ u32 dump_seg_sz[FW_CRASH_DUMP_TYPE_MAX] = { 0 };
+
+ ath12k_mhi_coredump(mhi_ctrl, false);
+
+ len = ath12k_pci_coredump_calculate_size(ab, dump_seg_sz);
+ if (!len) {
+ ath12k_warn(ab, "No crash dump data found for devcoredump");
+ return;
+ }
+
+ rddm_img = mhi_ctrl->rddm_image;
+ fw_img = mhi_ctrl->fbc_image;
+
+ /* dev_coredumpv() requires vmalloc data */
+ buf = vzalloc(len);
+ if (!buf)
+ return;
+
+ ab->dump_data = buf;
+ ab->ath12k_coredump_len = len;
+ file_data = ab->dump_data;
+ strscpy(file_data->df_magic, "ATH12K-FW-DUMP", sizeof(file_data->df_magic));
+ file_data->len = cpu_to_le32(len);
+ file_data->version = cpu_to_le32(ATH12K_FW_CRASH_DUMP_V2);
+ file_data->chip_id = cpu_to_le32(ab_pci->dev_id);
+ file_data->qrtr_id = cpu_to_le32(ab_pci->ab->qmi.service_ins_id);
+ file_data->bus_id = cpu_to_le32(pci_domain_nr(ab_pci->pdev->bus));
+ guid_gen(&file_data->guid);
+ ktime_get_real_ts64(&timestamp);
+ file_data->tv_sec = cpu_to_le64(timestamp.tv_sec);
+ file_data->tv_nsec = cpu_to_le64(timestamp.tv_nsec);
+ buf += hdr_len;
+ dump_tlv = buf;
+ dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_PAGING_DATA);
+ dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA]);
+ buf += COREDUMP_TLV_HDR_SIZE;
+
+ /* append all segments together as they are all part of a single contiguous
+ * block of memory
+ */
+ for (i = 0; i < fw_img->entries ; i++) {
+ if (!fw_img->mhi_buf[i].buf)
+ continue;
+
+ memcpy_fromio(buf, (void const __iomem *)fw_img->mhi_buf[i].buf,
+ fw_img->mhi_buf[i].len);
+ buf += fw_img->mhi_buf[i].len;
+ }
+
+ dump_tlv = buf;
+ dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_RDDM_DATA);
+ dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA]);
+ buf += COREDUMP_TLV_HDR_SIZE;
+
+ /* append all segments together as they are all part of a single contiguous
+ * block of memory
+ */
+ for (i = 0; i < rddm_img->entries; i++) {
+ if (!rddm_img->mhi_buf[i].buf)
+ continue;
+
+ memcpy_fromio(buf, (void const __iomem *)rddm_img->mhi_buf[i].buf,
+ rddm_img->mhi_buf[i].len);
+ buf += rddm_img->mhi_buf[i].len;
+ }
+
+ mem_idx = FW_CRASH_DUMP_REMOTE_MEM_DATA;
+ for (; mem_idx < FW_CRASH_DUMP_TYPE_MAX; mem_idx++) {
+ if (!dump_seg_sz[mem_idx] || mem_idx == FW_CRASH_DUMP_NONE)
+ continue;
+
+ dump_tlv = buf;
+ dump_tlv->type = cpu_to_le32(mem_idx);
+ dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[mem_idx]);
+ buf += COREDUMP_TLV_HDR_SIZE;
+
+ for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ mem_type = ath12k_coredump_get_dump_type
+ (ab->qmi.target_mem[i].type);
+
+ if (mem_type != mem_idx)
+ continue;
+
+ if (!ab->qmi.target_mem[i].paddr) {
+ ath12k_dbg(ab, ATH12K_DBG_PCI,
+ "Skipping mem region type %d",
+ ab->qmi.target_mem[i].type);
+ continue;
+ }
+
+ memcpy_fromio(buf, ab->qmi.target_mem[i].v.ioaddr,
+ ab->qmi.target_mem[i].size);
+ buf += ab->qmi.target_mem[i].size;
+ }
+ }
+
+ queue_work(ab->workqueue, &ab->dump_work);
+}
+#endif
+
int ath12k_pci_power_up(struct ath12k_base *ab)
{
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
@@ -1271,7 +1480,7 @@ int ath12k_pci_power_up(struct ath12k_base *ab)
return 0;
}
-void ath12k_pci_power_down(struct ath12k_base *ab)
+void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend)
{
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
@@ -1280,11 +1489,18 @@ void ath12k_pci_power_down(struct ath12k_base *ab)
ath12k_pci_force_wake(ab_pci->ab);
ath12k_pci_msi_disable(ab_pci);
- ath12k_mhi_stop(ab_pci);
+ ath12k_mhi_stop(ab_pci, is_suspend);
clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
ath12k_pci_sw_reset(ab_pci->ab, false);
}
+static int ath12k_pci_panic_handler(struct ath12k_base *ab)
+{
+ ath12k_pci_sw_reset(ab, false);
+
+ return NOTIFY_OK;
+}
+
static const struct ath12k_hif_ops ath12k_pci_hif_ops = {
.start = ath12k_pci_start,
.stop = ath12k_pci_stop,
@@ -1302,6 +1518,10 @@ static const struct ath12k_hif_ops ath12k_pci_hif_ops = {
.ce_irq_enable = ath12k_pci_hif_ce_irq_enable,
.ce_irq_disable = ath12k_pci_hif_ce_irq_disable,
.get_ce_msi_idx = ath12k_pci_get_ce_msi_idx,
+ .panic_handler = ath12k_pci_panic_handler,
+#ifdef CONFIG_ATH12K_COREDUMP
+ .coredump_download = ath12k_pci_coredump_download,
+#endif
};
static
@@ -1503,15 +1723,18 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
- ath12k_pci_power_down(ab);
+ ath12k_pci_power_down(ab, false);
ath12k_qmi_deinit_service(ab);
+ ath12k_core_hw_group_unassign(ab);
goto qmi_fail;
}
set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags);
cancel_work_sync(&ab->reset_work);
+ cancel_work_sync(&ab->dump_work);
ath12k_core_deinit(ab);
+ ath12k_fw_unmap(ab);
qmi_fail:
ath12k_mhi_unregister(ab_pci);
@@ -1531,7 +1754,7 @@ static void ath12k_pci_shutdown(struct pci_dev *pdev)
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
- ath12k_pci_power_down(ab);
+ ath12k_pci_power_down(ab, false);
}
static __maybe_unused int ath12k_pci_pm_suspend(struct device *dev)
@@ -1558,9 +1781,36 @@ static __maybe_unused int ath12k_pci_pm_resume(struct device *dev)
return ret;
}
-static SIMPLE_DEV_PM_OPS(ath12k_pci_pm_ops,
- ath12k_pci_pm_suspend,
- ath12k_pci_pm_resume);
+static __maybe_unused int ath12k_pci_pm_suspend_late(struct device *dev)
+{
+ struct ath12k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath12k_core_suspend_late(ab);
+ if (ret)
+ ath12k_warn(ab, "failed to late suspend core: %d\n", ret);
+
+ return ret;
+}
+
+static __maybe_unused int ath12k_pci_pm_resume_early(struct device *dev)
+{
+ struct ath12k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath12k_core_resume_early(ab);
+ if (ret)
+ ath12k_warn(ab, "failed to early resume core: %d\n", ret);
+
+ return ret;
+}
+
+static const struct dev_pm_ops __maybe_unused ath12k_pci_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ath12k_pci_pm_suspend,
+ ath12k_pci_pm_resume)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(ath12k_pci_pm_suspend_late,
+ ath12k_pci_pm_resume_early)
+};
static struct pci_driver ath12k_pci_driver = {
.name = "ath12k_pci",
diff --git a/drivers/net/wireless/ath/ath12k/pci.h b/drivers/net/wireless/ath/ath12k/pci.h
index ca93693ba4e9..31584a7ad80e 100644
--- a/drivers/net/wireless/ath/ath12k/pci.h
+++ b/drivers/net/wireless/ath/ath12k/pci.h
@@ -104,6 +104,7 @@ struct ath12k_pci {
struct mhi_controller *mhi_ctrl;
const struct ath12k_msi_config *msi_config;
unsigned long mhi_state;
+ enum mhi_callback mhi_pre_cb;
u32 register_window;
/* protects register_window above */
@@ -143,5 +144,5 @@ int ath12k_pci_hif_resume(struct ath12k_base *ab);
void ath12k_pci_stop(struct ath12k_base *ab);
int ath12k_pci_start(struct ath12k_base *ab);
int ath12k_pci_power_up(struct ath12k_base *ab);
-void ath12k_pci_power_down(struct ath12k_base *ab);
+void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend);
#endif /* ATH12K_PCI_H */
diff --git a/drivers/net/wireless/ath/ath12k/peer.c b/drivers/net/wireless/ath/ath12k/peer.c
index 19c0626fbff1..792cca8a3fb1 100644
--- a/drivers/net/wireless/ath/ath12k/peer.c
+++ b/drivers/net/wireless/ath/ath12k/peer.c
@@ -1,13 +1,29 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#include "peer.h"
#include "debug.h"
+static struct ath12k_ml_peer *ath12k_peer_ml_find(struct ath12k_hw *ah, const u8 *addr)
+{
+ struct ath12k_ml_peer *ml_peer;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ list_for_each_entry(ml_peer, &ah->ml_peers, list) {
+ if (!ether_addr_equal(ml_peer->addr, addr))
+ continue;
+
+ return ml_peer;
+ }
+
+ return NULL;
+}
+
struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id,
const u8 *addr)
{
@@ -63,6 +79,20 @@ struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab,
return NULL;
}
+static struct ath12k_peer *ath12k_peer_find_by_ml_id(struct ath12k_base *ab,
+ int ml_peer_id)
+{
+ struct ath12k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list)
+ if (ml_peer_id == peer->ml_id)
+ return peer;
+
+ return NULL;
+}
+
struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab,
int peer_id)
{
@@ -70,6 +100,9 @@ struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab,
lockdep_assert_held(&ab->base_lock);
+ if (peer_id & ATH12K_PEER_ML_ID_VALID)
+ return ath12k_peer_find_by_ml_id(ab, peer_id);
+
list_for_each_entry(peer, &ab->peers, list)
if (peer_id == peer->peer_id)
return peer;
@@ -186,7 +219,7 @@ void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id)
struct ath12k_peer *peer, *tmp;
struct ath12k_base *ab = ar->ab;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
spin_lock_bh(&ab->base_lock);
list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
@@ -231,22 +264,36 @@ int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
return 0;
}
-int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
+static int ath12k_peer_delete_send(struct ath12k *ar, u32 vdev_id, const u8 *addr)
{
+ struct ath12k_base *ab = ar->ab;
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
reinit_completion(&ar->peer_delete_done);
ret = ath12k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
if (ret) {
- ath12k_warn(ar->ab,
+ ath12k_warn(ab,
"failed to delete peer vdev_id %d addr %pM ret %d\n",
vdev_id, addr, ret);
return ret;
}
+ return 0;
+}
+
+int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
+{
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ ret = ath12k_peer_delete_send(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
ret = ath12k_wait_for_peer_delete_done(ar, vdev_id, addr);
if (ret)
return ret;
@@ -261,14 +308,19 @@ static int ath12k_wait_for_peer_created(struct ath12k *ar, int vdev_id, const u8
return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
}
-int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif,
+int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
struct ieee80211_sta *sta,
struct ath12k_wmi_peer_create_arg *arg)
{
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ath12k_link_sta *arsta;
+ u8 link_id = arvif->link_id;
struct ath12k_peer *peer;
+ struct ath12k_sta *ahsta;
+ u16 ml_peer_id;
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
if (ar->num_peers > (ar->max_num_peers - 1)) {
ath12k_warn(ar->ab,
@@ -326,11 +378,34 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif,
peer->pdev_idx = ar->pdev_idx;
peer->sta = sta;
- if (arvif->vif->type == NL80211_IFTYPE_STATION) {
+ if (vif->type == NL80211_IFTYPE_STATION) {
arvif->ast_hash = peer->ast_hash;
arvif->ast_idx = peer->hw_peer_id;
}
+ if (sta) {
+ ahsta = ath12k_sta_to_ahsta(sta);
+ arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahsta->link[link_id]);
+
+ peer->link_id = arsta->link_id;
+
+ /* Fill ML info into created peer */
+ if (sta->mlo) {
+ ml_peer_id = ahsta->ml_peer_id;
+ peer->ml_id = ml_peer_id | ATH12K_PEER_ML_ID_VALID;
+ ether_addr_copy(peer->ml_addr, sta->addr);
+
+ /* the assoc link is considered primary for now */
+ peer->primary_link = arsta->is_assoc_link;
+ peer->mlo = true;
+ } else {
+ peer->ml_id = ATH12K_MLO_PEER_ID_INVALID;
+ peer->primary_link = true;
+ peer->mlo = false;
+ }
+ }
+
peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
@@ -340,3 +415,150 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif,
return 0;
}
+
+static u16 ath12k_peer_ml_alloc(struct ath12k_hw *ah)
+{
+ u16 ml_peer_id;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ for (ml_peer_id = 0; ml_peer_id < ATH12K_MAX_MLO_PEERS; ml_peer_id++) {
+ if (test_bit(ml_peer_id, ah->free_ml_peer_id_map))
+ continue;
+
+ set_bit(ml_peer_id, ah->free_ml_peer_id_map);
+ break;
+ }
+
+ if (ml_peer_id == ATH12K_MAX_MLO_PEERS)
+ ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
+
+ return ml_peer_id;
+}
+
+int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta)
+{
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ struct ath12k_ml_peer *ml_peer;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (!sta->mlo)
+ return -EINVAL;
+
+ ml_peer = ath12k_peer_ml_find(ah, sta->addr);
+ if (ml_peer) {
+ ath12k_hw_warn(ah, "ML peer %d exists already, unable to add new entry for %pM",
+ ml_peer->id, sta->addr);
+ return -EEXIST;
+ }
+
+ ml_peer = kzalloc(sizeof(*ml_peer), GFP_ATOMIC);
+ if (!ml_peer)
+ return -ENOMEM;
+
+ ahsta->ml_peer_id = ath12k_peer_ml_alloc(ah);
+
+ if (ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) {
+ ath12k_hw_warn(ah, "unable to allocate ML peer id for sta %pM",
+ sta->addr);
+ kfree(ml_peer);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(ml_peer->addr, sta->addr);
+ ml_peer->id = ahsta->ml_peer_id;
+ list_add(&ml_peer->list, &ah->ml_peers);
+
+ return 0;
+}
+
+int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta)
+{
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ struct ath12k_ml_peer *ml_peer;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (!sta->mlo)
+ return -EINVAL;
+
+ clear_bit(ahsta->ml_peer_id, ah->free_ml_peer_id_map);
+ ahsta->ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
+
+ ml_peer = ath12k_peer_ml_find(ah, sta->addr);
+ if (!ml_peer) {
+ ath12k_hw_warn(ah, "ML peer for %pM not found", sta->addr);
+ return -EINVAL;
+ }
+
+ list_del(&ml_peer->list);
+ kfree(ml_peer);
+
+ return 0;
+}
+
+int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta)
+{
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
+ struct ath12k_hw *ah = ahvif->ah;
+ struct ath12k_link_vif *arvif;
+ struct ath12k_link_sta *arsta;
+ unsigned long links;
+ struct ath12k *ar;
+ int ret, err_ret = 0;
+ u8 link_id;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (!sta->mlo)
+ return -EINVAL;
+
+ /* FW expects delete of all link peers at once before waiting for reception
+ * of peer unmap or delete responses
+ */
+ links = ahsta->links_map;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+ arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+ if (!arvif || !arsta)
+ continue;
+
+ ar = arvif->ar;
+ if (!ar)
+ continue;
+
+ ath12k_dp_peer_cleanup(ar, arvif->vdev_id, arsta->addr);
+
+ ret = ath12k_peer_delete_send(ar, arvif->vdev_id, arsta->addr);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to delete peer vdev_id %d addr %pM ret %d\n",
+ arvif->vdev_id, arsta->addr, ret);
+ err_ret = ret;
+ continue;
+ }
+ }
+
+ /* Ensure all link peers are deleted and unmapped */
+ links = ahsta->links_map;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+ arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+ if (!arvif || !arsta)
+ continue;
+
+ ar = arvif->ar;
+ if (!ar)
+ continue;
+
+ ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id, arsta->addr);
+ if (ret) {
+ err_ret = ret;
+ continue;
+ }
+ ar->num_peers--;
+ }
+
+ return err_ret;
+}
diff --git a/drivers/net/wireless/ath/ath12k/peer.h b/drivers/net/wireless/ath/ath12k/peer.h
index 7b3500b5c8c2..5870ee11a8c7 100644
--- a/drivers/net/wireless/ath/ath12k/peer.h
+++ b/drivers/net/wireless/ath/ath12k/peer.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_PEER_H
@@ -19,6 +19,8 @@ struct ppdu_user_delayba {
u32 resp_rate_flags;
};
+#define ATH12K_PEER_ML_ID_VALID BIT(13)
+
struct ath12k_peer {
struct list_head list;
struct ieee80211_sta *sta;
@@ -44,9 +46,28 @@ struct ath12k_peer {
struct ppdu_user_delayba ppdu_stats_delayba;
bool delayba_flag;
bool is_authorized;
-
+ bool mlo;
/* protected by ab->data_lock */
bool dp_setup_done;
+
+ u16 ml_id;
+
+ /* any other ML info common for all partners can be added
+ * here and would be same for all partner peers.
+ */
+ u8 ml_addr[ETH_ALEN];
+
+ /* To ensure only certain work related to dp is done once */
+ bool primary_link;
+
+ /* for reference to ath12k_link_sta */
+ u8 link_id;
+};
+
+struct ath12k_ml_peer {
+ struct list_head list;
+ u8 addr[ETH_ALEN];
+ u16 id;
};
void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id);
@@ -59,12 +80,15 @@ struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab,
struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab, int peer_id);
void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id);
int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr);
-int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif,
+int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
struct ieee80211_sta *sta,
struct ath12k_wmi_peer_create_arg *arg);
int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
const u8 *addr);
bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id);
struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab, int ast_hash);
+int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta);
+int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta);
+int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta);
#endif /* _PEER_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
index 92845ffff44a..5c3563383fab 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.c
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -583,6 +583,24 @@ static const struct qmi_elem_info qmi_wlanfw_phy_cap_resp_msg_v01_ei[] = {
board_id),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+ single_chip_mlo_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+ single_chip_mlo_support),
+ },
+ {
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
@@ -1998,49 +2016,146 @@ static const struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
},
};
-static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
- struct qmi_wlanfw_host_cap_req_msg_v01 *req)
+static void ath12k_host_cap_hw_link_id_init(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab, *partner_ab;
+ int i, j, hw_id_base;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ hw_id_base = 0;
+ ab = ag->ab[i];
+
+ for (j = 0; j < ag->num_devices; j++) {
+ partner_ab = ag->ab[j];
+
+ if (partner_ab->wsi_info.index >= ab->wsi_info.index)
+ continue;
+
+ hw_id_base += partner_ab->qmi.num_radios;
+ }
+
+ ab->wsi_info.hw_link_id_base = hw_id_base;
+ }
+
+ ag->hw_link_id_init_done = true;
+}
+
+static int ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
+ struct qmi_wlanfw_host_cap_req_msg_v01 *req)
{
struct wlfw_host_mlo_chip_info_s_v01 *info;
+ struct ath12k_hw_group *ag = ab->ag;
+ struct ath12k_base *partner_ab;
u8 hw_link_id = 0;
- int i;
+ int i, j, ret;
+
+ if (!ag->mlo_capable) {
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "MLO is disabled hence skip QMI MLO cap");
+ return 0;
+ }
if (!ab->qmi.num_radios || ab->qmi.num_radios == U8_MAX) {
+ ab->single_chip_mlo_supp = false;
+
ath12k_dbg(ab, ATH12K_DBG_QMI,
"skip QMI MLO cap due to invalid num_radio %d\n",
ab->qmi.num_radios);
- return;
+ return 0;
+ }
+
+ if (ab->device_id == ATH12K_INVALID_DEVICE_ID) {
+ ath12k_err(ab, "failed to send MLO cap due to invalid device id\n");
+ return -EINVAL;
}
req->mlo_capable_valid = 1;
req->mlo_capable = 1;
req->mlo_chip_id_valid = 1;
- req->mlo_chip_id = 0;
+ req->mlo_chip_id = ab->device_id;
req->mlo_group_id_valid = 1;
- req->mlo_group_id = 0;
+ req->mlo_group_id = ag->id;
req->max_mlo_peer_valid = 1;
/* Max peer number generally won't change for the same device
* but needs to be synced with host driver.
*/
req->max_mlo_peer = ab->hw_params->max_mlo_peer;
req->mlo_num_chips_valid = 1;
- req->mlo_num_chips = 1;
+ req->mlo_num_chips = ag->num_devices;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "mlo capability advertisement device_id %d group_id %d num_devices %d",
+ req->mlo_chip_id, req->mlo_group_id, req->mlo_num_chips);
+
+ mutex_lock(&ag->mutex);
+
+ if (!ag->hw_link_id_init_done)
+ ath12k_host_cap_hw_link_id_init(ag);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ info = &req->mlo_chip_info[i];
+ partner_ab = ag->ab[i];
+
+ if (partner_ab->device_id == ATH12K_INVALID_DEVICE_ID) {
+ ath12k_err(ab, "failed to send MLO cap due to invalid partner device id\n");
+ ret = -EINVAL;
+ goto device_cleanup;
+ }
+
+ info->chip_id = partner_ab->device_id;
+ info->num_local_links = partner_ab->qmi.num_radios;
- info = &req->mlo_chip_info[0];
- info->chip_id = 0;
- info->num_local_links = ab->qmi.num_radios;
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "mlo device id %d num_link %d\n",
+ info->chip_id, info->num_local_links);
- for (i = 0; i < info->num_local_links; i++) {
- info->hw_link_id[i] = hw_link_id;
- info->valid_mlo_link_id[i] = 1;
+ for (j = 0; j < info->num_local_links; j++) {
+ info->hw_link_id[j] = partner_ab->wsi_info.hw_link_id_base + j;
+ info->valid_mlo_link_id[j] = 1;
- hw_link_id++;
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "mlo hw_link_id %d\n",
+ info->hw_link_id[j]);
+
+ hw_link_id++;
+ }
}
+ if (hw_link_id <= 0)
+ ag->mlo_capable = false;
+
req->mlo_chip_info_valid = 1;
+
+ mutex_unlock(&ag->mutex);
+
+ return 0;
+
+device_cleanup:
+ for (i = i - 1; i >= 0; i--) {
+ info = &req->mlo_chip_info[i];
+
+ memset(info, 0, sizeof(*info));
+ }
+
+ req->mlo_num_chips = 0;
+ req->mlo_num_chips_valid = 0;
+
+ req->max_mlo_peer = 0;
+ req->max_mlo_peer_valid = 0;
+ req->mlo_group_id = 0;
+ req->mlo_group_id_valid = 0;
+ req->mlo_chip_id = 0;
+ req->mlo_chip_id_valid = 0;
+ req->mlo_capable = 0;
+ req->mlo_capable_valid = 0;
+
+ ag->mlo_capable = false;
+
+ mutex_unlock(&ag->mutex);
+
+ return ret;
}
-static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
{
struct qmi_wlanfw_host_cap_req_msg_v01 req = {};
struct qmi_wlanfw_host_cap_resp_msg_v01 resp = {};
@@ -2085,7 +2200,9 @@ static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET;
}
- ath12k_host_cap_parse_mlo(ab, &req);
+ ret = ath12k_host_cap_parse_mlo(ab, &req);
+ if (ret < 0)
+ goto out;
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp);
@@ -2124,9 +2241,6 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
struct qmi_txn txn;
int ret;
- if (!ab->slo_capable)
- goto out;
-
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_phy_cap_resp_msg_v01_ei, &resp);
if (ret < 0)
@@ -2151,6 +2265,10 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
goto out;
}
+ if (resp.single_chip_mlo_support_valid &&
+ resp.single_chip_mlo_support)
+ ab->single_chip_mlo_supp = true;
+
if (!resp.num_phy_valid) {
ret = -ENODATA;
goto out;
@@ -2158,9 +2276,11 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
ab->qmi.num_radios = resp.num_phy;
- ath12k_dbg(ab, ATH12K_DBG_QMI, "phy capability resp valid %d num_phy %d valid %d board_id %d\n",
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "phy capability resp valid %d num_phy %d valid %d board_id %d valid %d single_chip_mlo_support %d\n",
resp.num_phy_valid, resp.num_phy,
- resp.board_id_valid, resp.board_id);
+ resp.board_id_valid, resp.board_id,
+ resp.single_chip_mlo_support_valid, resp.single_chip_mlo_support);
return;
@@ -2243,7 +2363,9 @@ resp_out:
return ret;
}
-static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
{
struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
struct qmi_wlanfw_respond_mem_resp_msg_v01 resp = {};
@@ -2318,29 +2440,125 @@ out:
return ret;
}
+static void ath12k_qmi_free_mlo_mem_chunk(struct ath12k_base *ab,
+ struct target_mem_chunk *chunk,
+ int idx)
+{
+ struct ath12k_hw_group *ag = ab->ag;
+ struct target_mem_chunk *mlo_chunk;
+
+ lockdep_assert_held(&ag->mutex);
+
+ if (!ag->mlo_mem.init_done || ag->num_started)
+ return;
+
+ if (idx >= ARRAY_SIZE(ag->mlo_mem.chunk)) {
+ ath12k_warn(ab, "invalid index for MLO memory chunk free: %d\n", idx);
+ return;
+ }
+
+ mlo_chunk = &ag->mlo_mem.chunk[idx];
+ if (mlo_chunk->v.addr) {
+ dma_free_coherent(ab->dev,
+ mlo_chunk->size,
+ mlo_chunk->v.addr,
+ mlo_chunk->paddr);
+ mlo_chunk->v.addr = NULL;
+ }
+
+ mlo_chunk->paddr = 0;
+ mlo_chunk->size = 0;
+ chunk->v.addr = NULL;
+ chunk->paddr = 0;
+ chunk->size = 0;
+}
+
static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab)
{
- int i;
+ struct ath12k_hw_group *ag = ab->ag;
+ int i, mlo_idx;
- for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) {
if (!ab->qmi.target_mem[i].v.addr)
continue;
- dma_free_coherent(ab->dev,
- ab->qmi.target_mem[i].size,
- ab->qmi.target_mem[i].v.addr,
- ab->qmi.target_mem[i].paddr);
- ab->qmi.target_mem[i].v.addr = NULL;
+
+ if (ab->qmi.target_mem[i].type == MLO_GLOBAL_MEM_REGION_TYPE) {
+ ath12k_qmi_free_mlo_mem_chunk(ab,
+ &ab->qmi.target_mem[i],
+ mlo_idx++);
+ } else {
+ dma_free_coherent(ab->dev,
+ ab->qmi.target_mem[i].prev_size,
+ ab->qmi.target_mem[i].v.addr,
+ ab->qmi.target_mem[i].paddr);
+ ab->qmi.target_mem[i].v.addr = NULL;
+ }
+ }
+
+ if (!ag->num_started && ag->mlo_mem.init_done) {
+ ag->mlo_mem.init_done = false;
+ ag->mlo_mem.mlo_mem_size = 0;
}
}
+static int ath12k_qmi_alloc_chunk(struct ath12k_base *ab,
+ struct target_mem_chunk *chunk)
+{
+ /* Firmware reloads in recovery/resume.
+ * In such cases, no need to allocate memory for FW again.
+ */
+ if (chunk->v.addr) {
+ if (chunk->prev_type == chunk->type &&
+ chunk->prev_size == chunk->size)
+ goto this_chunk_done;
+
+ /* cannot reuse the existing chunk */
+ dma_free_coherent(ab->dev, chunk->prev_size,
+ chunk->v.addr, chunk->paddr);
+ chunk->v.addr = NULL;
+ }
+
+ chunk->v.addr = dma_alloc_coherent(ab->dev,
+ chunk->size,
+ &chunk->paddr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!chunk->v.addr) {
+ if (chunk->size > ATH12K_QMI_MAX_CHUNK_SIZE) {
+ ab->qmi.target_mem_delayed = true;
+ ath12k_warn(ab,
+ "qmi dma allocation failed (%d B type %u), will try later with small size\n",
+ chunk->size,
+ chunk->type);
+ ath12k_qmi_free_target_mem_chunk(ab);
+ return -EAGAIN;
+ }
+ ath12k_warn(ab, "memory allocation failure for %u size: %d\n",
+ chunk->type, chunk->size);
+ return -ENOMEM;
+ }
+ chunk->prev_type = chunk->type;
+ chunk->prev_size = chunk->size;
+this_chunk_done:
+ return 0;
+}
+
static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
{
- int i;
- struct target_mem_chunk *chunk;
+ struct target_mem_chunk *chunk, *mlo_chunk;
+ struct ath12k_hw_group *ag = ab->ag;
+ int i, mlo_idx, ret;
+ int mlo_size = 0;
+
+ mutex_lock(&ag->mutex);
+
+ if (!ag->mlo_mem.init_done) {
+ memset(ag->mlo_mem.chunk, 0, sizeof(ag->mlo_mem.chunk));
+ ag->mlo_mem.init_done = true;
+ }
ab->qmi.target_mem_delayed = false;
- for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) {
chunk = &ab->qmi.target_mem[i];
/* Allocate memory for the region and the functionality supported
@@ -2352,24 +2570,41 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
case M3_DUMP_REGION_TYPE:
case PAGEABLE_MEM_REGION_TYPE:
case CALDB_MEM_REGION_TYPE:
- chunk->v.addr = dma_alloc_coherent(ab->dev,
- chunk->size,
- &chunk->paddr,
- GFP_KERNEL | __GFP_NOWARN);
- if (!chunk->v.addr) {
- if (chunk->size > ATH12K_QMI_MAX_CHUNK_SIZE) {
- ab->qmi.target_mem_delayed = true;
- ath12k_warn(ab,
- "qmi dma allocation failed (%d B type %u), will try later with small size\n",
- chunk->size,
- chunk->type);
- ath12k_qmi_free_target_mem_chunk(ab);
- return 0;
+ ret = ath12k_qmi_alloc_chunk(ab, chunk);
+ if (ret)
+ goto err;
+ break;
+ case MLO_GLOBAL_MEM_REGION_TYPE:
+ mlo_size += chunk->size;
+ if (ag->mlo_mem.mlo_mem_size &&
+ mlo_size > ag->mlo_mem.mlo_mem_size) {
+ ath12k_err(ab, "QMI MLO memory allocation failure, requested size %d is more than allocated size %d",
+ mlo_size, ag->mlo_mem.mlo_mem_size);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ mlo_chunk = &ag->mlo_mem.chunk[mlo_idx];
+ if (mlo_chunk->paddr) {
+ if (chunk->size != mlo_chunk->size) {
+ ath12k_err(ab, "QMI MLO chunk memory allocation failure for index %d, requested size %d is more than allocated size %d",
+ mlo_idx, chunk->size, mlo_chunk->size);
+ ret = -EINVAL;
+ goto err;
}
- ath12k_warn(ab, "memory allocation failure for %u size: %d\n",
- chunk->type, chunk->size);
- return -ENOMEM;
+ } else {
+ mlo_chunk->size = chunk->size;
+ mlo_chunk->type = chunk->type;
+ ret = ath12k_qmi_alloc_chunk(ab, mlo_chunk);
+ if (ret)
+ goto err;
+ memset(mlo_chunk->v.addr, 0, mlo_chunk->size);
}
+
+ chunk->paddr = mlo_chunk->paddr;
+ chunk->v.addr = mlo_chunk->v.addr;
+ mlo_idx++;
+
break;
default:
ath12k_warn(ab, "memory type %u not supported\n",
@@ -2379,10 +2614,39 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
break;
}
}
+
+ if (!ag->mlo_mem.mlo_mem_size) {
+ ag->mlo_mem.mlo_mem_size = mlo_size;
+ } else if (ag->mlo_mem.mlo_mem_size != mlo_size) {
+ ath12k_err(ab, "QMI MLO memory size error, expected size is %d but requested size is %d",
+ ag->mlo_mem.mlo_mem_size, mlo_size);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ mutex_unlock(&ag->mutex);
+
return 0;
+
+err:
+ ath12k_qmi_free_target_mem_chunk(ab);
+
+ mutex_unlock(&ag->mutex);
+
+ /* The firmware will attempt to request memory in smaller chunks
+ * on the next try. However, the current caller should be notified
+ * that this instance of request parsing was successful.
+ * Therefore, return 0 only.
+ */
+ if (ret == -EAGAIN)
+ ret = 0;
+
+ return ret;
}
-static int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
{
struct qmi_wlanfw_cap_req_msg_v01 req = {};
struct qmi_wlanfw_cap_resp_msg_v01 resp = {};
@@ -2452,7 +2716,7 @@ static int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
ab->qmi.dev_mem[i].size =
resp.dev_mem[i].size;
ath12k_dbg(ab, ATH12K_DBG_QMI,
- "devmem [%d] start ox%llx size %llu\n", i,
+ "devmem [%d] start 0x%llx size %llu\n", i,
ab->qmi.dev_mem[i].start,
ab->qmi.dev_mem[i].size);
}
@@ -2487,7 +2751,7 @@ static int ath12k_qmi_load_file_target_mem(struct ath12k_base *ab,
struct qmi_wlanfw_bdf_download_resp_msg_v01 resp = {};
struct qmi_txn txn;
const u8 *temp = data;
- int ret;
+ int ret = 0;
u32 remaining = len;
req = kzalloc(sizeof(*req), GFP_KERNEL);
@@ -2568,8 +2832,10 @@ out:
return ret;
}
-static int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab,
- enum ath12k_qmi_bdf_type type)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab,
+ enum ath12k_qmi_bdf_type type)
{
struct device *dev = ab->dev;
char filename[ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE];
@@ -2666,6 +2932,19 @@ out:
return ret;
}
+static void ath12k_qmi_m3_free(struct ath12k_base *ab)
+{
+ struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+
+ if (!m3_mem->vaddr)
+ return;
+
+ dma_free_coherent(ab->dev, m3_mem->size,
+ m3_mem->vaddr, m3_mem->paddr);
+ m3_mem->vaddr = NULL;
+ m3_mem->size = 0;
+}
+
static int ath12k_qmi_m3_load(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
@@ -2675,10 +2954,6 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab)
size_t m3_len;
int ret;
- if (m3_mem->vaddr)
- /* m3 firmware buffer is already available in the DMA buffer */
- return 0;
-
if (ab->fw.m3_data && ab->fw.m3_len > 0) {
/* firmware-N.bin had a m3 firmware file so use that */
m3_data = ab->fw.m3_data;
@@ -2700,6 +2975,15 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab)
m3_len = fw->size;
}
+ /* In recovery/resume cases, M3 buffer is not freed, try to reuse that */
+ if (m3_mem->vaddr) {
+ if (m3_mem->size >= m3_len)
+ goto skip_m3_alloc;
+
+ /* Old buffer is too small, free and reallocate */
+ ath12k_qmi_m3_free(ab);
+ }
+
m3_mem->vaddr = dma_alloc_coherent(ab->dev,
m3_len, &m3_mem->paddr,
GFP_KERNEL);
@@ -2710,6 +2994,7 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab)
goto out;
}
+skip_m3_alloc:
memcpy(m3_mem->vaddr, m3_data, m3_len);
m3_mem->size = m3_len;
@@ -2721,20 +3006,9 @@ out:
return ret;
}
-static void ath12k_qmi_m3_free(struct ath12k_base *ab)
-{
- struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
-
- if (!m3_mem->vaddr)
- return;
-
- dma_free_coherent(ab->dev, m3_mem->size,
- m3_mem->vaddr, m3_mem->paddr);
- m3_mem->vaddr = NULL;
- m3_mem->size = 0;
-}
-
-static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
struct qmi_wlanfw_m3_info_req_msg_v01 req = {};
@@ -2966,6 +3240,8 @@ void ath12k_qmi_firmware_stop(struct ath12k_base *ab)
{
int ret;
+ clear_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags);
+
ret = ath12k_qmi_wlanfw_mode_send(ab, ATH12K_FIRMWARE_MODE_OFF);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to send wlan mode off\n");
@@ -3022,9 +3298,69 @@ ath12k_qmi_driver_event_post(struct ath12k_qmi *qmi,
return 0;
}
-static int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
+void ath12k_qmi_trigger_host_cap(struct ath12k_base *ab)
{
- struct ath12k_base *ab = qmi->ab;
+ struct ath12k_qmi *qmi = &ab->qmi;
+
+ spin_lock(&qmi->event_lock);
+
+ if (ath12k_qmi_get_event_block(qmi))
+ ath12k_qmi_set_event_block(qmi, false);
+
+ spin_unlock(&qmi->event_lock);
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "trigger host cap for device id %d\n",
+ ab->device_id);
+
+ ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_HOST_CAP, NULL);
+}
+
+static bool ath12k_qmi_hw_group_host_cap_ready(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+
+ if (!(ab && ab->qmi.num_radios != U8_MAX))
+ return false;
+ }
+
+ return true;
+}
+
+static struct ath12k_base *ath12k_qmi_hw_group_find_blocked(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ lockdep_assert_held(&ag->mutex);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ spin_lock(&ab->qmi.event_lock);
+
+ if (ath12k_qmi_get_event_block(&ab->qmi)) {
+ spin_unlock(&ab->qmi.event_lock);
+ return ab;
+ }
+
+ spin_unlock(&ab->qmi.event_lock);
+ }
+
+ return NULL;
+}
+
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
+{
+ struct ath12k_base *ab = qmi->ab, *block_ab;
+ struct ath12k_hw_group *ag = ab->ag;
int ret;
ath12k_qmi_phy_cap_send(ab);
@@ -3035,16 +3371,30 @@ static int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
return ret;
}
- ret = ath12k_qmi_host_cap_send(ab);
- if (ret < 0) {
- ath12k_warn(ab, "qmi failed to send host cap QMI:%d\n", ret);
- return ret;
+ spin_lock(&qmi->event_lock);
+
+ ath12k_qmi_set_event_block(qmi, true);
+
+ spin_unlock(&qmi->event_lock);
+
+ mutex_lock(&ag->mutex);
+
+ if (ath12k_qmi_hw_group_host_cap_ready(ag)) {
+ ath12k_core_hw_group_set_mlo_capable(ag);
+
+ block_ab = ath12k_qmi_hw_group_find_blocked(ag);
+ if (block_ab)
+ ath12k_qmi_trigger_host_cap(block_ab);
}
+ mutex_unlock(&ag->mutex);
+
return ret;
}
-static int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi)
{
struct ath12k_base *ab = qmi->ab;
int ret;
@@ -3058,7 +3408,9 @@ static int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi)
return ret;
}
-static int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi)
{
struct ath12k_base *ab = qmi->ab;
int ret;
@@ -3178,6 +3530,9 @@ static const struct qmi_msg_handler ath12k_qmi_msg_handlers[] = {
.decoded_size = sizeof(struct qmi_wlanfw_fw_ready_ind_msg_v01),
.fn = ath12k_qmi_msg_fw_ready_cb,
},
+
+ /* end of list */
+ {},
};
static int ath12k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,
@@ -3220,6 +3575,21 @@ static const struct qmi_ops ath12k_qmi_ops = {
.del_server = ath12k_qmi_ops_del_server,
};
+static int ath12k_qmi_event_host_cap(struct ath12k_qmi *qmi)
+{
+ struct ath12k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath12k_qmi_host_cap_send(ab);
+ if (ret < 0) {
+ ath12k_warn(ab, "failed to send qmi host cap for device id %d: %d\n",
+ ab->device_id, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
static void ath12k_qmi_driver_event_work(struct work_struct *work)
{
struct ath12k_qmi *qmi = container_of(work, struct ath12k_qmi,
@@ -3246,7 +3616,6 @@ static void ath12k_qmi_driver_event_work(struct work_struct *work)
break;
case ATH12K_QMI_EVENT_SERVER_EXIT:
set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
- set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
break;
case ATH12K_QMI_EVENT_REQUEST_MEM:
ret = ath12k_qmi_event_mem_request(qmi);
@@ -3260,19 +3629,28 @@ static void ath12k_qmi_driver_event_work(struct work_struct *work)
break;
case ATH12K_QMI_EVENT_FW_READY:
clear_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
- if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
- ath12k_hal_dump_srng_stats(ab);
+ if (test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags)) {
+ if (ab->is_reset)
+ ath12k_hal_dump_srng_stats(ab);
+
+ set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
queue_work(ab->workqueue, &ab->restart_work);
break;
}
clear_bit(ATH12K_FLAG_CRASH_FLUSH,
&ab->dev_flags);
- clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
- ath12k_core_qmi_firmware_ready(ab);
- set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+ ret = ath12k_core_qmi_firmware_ready(ab);
+ if (!ret)
+ set_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE,
+ &ab->dev_flags);
break;
+ case ATH12K_QMI_EVENT_HOST_CAP:
+ ret = ath12k_qmi_event_host_cap(qmi);
+ if (ret < 0)
+ set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
+ break;
default:
ath12k_warn(ab, "invalid event type: %d", event->type);
break;
@@ -3325,11 +3703,15 @@ int ath12k_qmi_init_service(struct ath12k_base *ab)
void ath12k_qmi_deinit_service(struct ath12k_base *ab)
{
+ if (!ab->qmi.ab)
+ return;
+
qmi_handle_release(&ab->qmi.handle);
cancel_work_sync(&ab->qmi.event_work);
destroy_workqueue(ab->qmi.event_wq);
ath12k_qmi_m3_free(ab);
ath12k_qmi_free_target_mem_chunk(ab);
+ ab->qmi.ab = NULL;
}
void ath12k_qmi_free_resource(struct ath12k_base *ab)
diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h
index 6ee33c9851c6..45d7c3fcafdd 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.h
+++ b/drivers/net/wireless/ath/ath12k/qmi.h
@@ -68,6 +68,7 @@ enum ath12k_qmi_event_type {
ATH12K_QMI_EVENT_FORCE_FW_ASSERT,
ATH12K_QMI_EVENT_POWER_UP,
ATH12K_QMI_EVENT_POWER_DOWN,
+ ATH12K_QMI_EVENT_HOST_CAP,
ATH12K_QMI_EVENT_MAX,
};
@@ -96,6 +97,8 @@ struct ath12k_qmi_event_msg {
struct target_mem_chunk {
u32 size;
u32 type;
+ u32 prev_size;
+ u32 prev_type;
dma_addr_t paddr;
union {
void __iomem *ioaddr;
@@ -140,6 +143,10 @@ struct ath12k_qmi {
u32 target_mem_mode;
bool target_mem_delayed;
u8 cal_done;
+
+ /* protected with struct ath12k_qmi::event_lock */
+ bool block_event;
+
u8 num_radios;
struct target_info target;
struct m3_mem_region m3_mem;
@@ -165,6 +172,7 @@ enum ath12k_qmi_target_mem {
BDF_MEM_REGION_TYPE = 0x2,
M3_DUMP_REGION_TYPE = 0x3,
CALDB_MEM_REGION_TYPE = 0x4,
+ MLO_GLOBAL_MEM_REGION_TYPE = 0x8,
PAGEABLE_MEM_REGION_TYPE = 0x9,
};
@@ -265,6 +273,8 @@ struct qmi_wlanfw_phy_cap_resp_msg_v01 {
u8 num_phy;
u8 board_id_valid;
u32 board_id;
+ u8 single_chip_mlo_support_valid;
+ u8 single_chip_mlo_support;
};
#define QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN 54
@@ -590,11 +600,26 @@ struct qmi_wlanfw_wlan_ini_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
+static inline void ath12k_qmi_set_event_block(struct ath12k_qmi *qmi, bool block)
+{
+ lockdep_assert_held(&qmi->event_lock);
+
+ qmi->block_event = block;
+}
+
+static inline bool ath12k_qmi_get_event_block(struct ath12k_qmi *qmi)
+{
+ lockdep_assert_held(&qmi->event_lock);
+
+ return qmi->block_event;
+}
+
int ath12k_qmi_firmware_start(struct ath12k_base *ab,
u32 mode);
void ath12k_qmi_firmware_stop(struct ath12k_base *ab);
void ath12k_qmi_deinit_service(struct ath12k_base *ab);
int ath12k_qmi_init_service(struct ath12k_base *ab);
void ath12k_qmi_free_resource(struct ath12k_base *ab);
+void ath12k_qmi_trigger_host_cap(struct ath12k_base *ab);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c
index f308e9a6ed55..439d61f284d8 100644
--- a/drivers/net/wireless/ath/ath12k/reg.c
+++ b/drivers/net/wireless/ath/ath12k/reg.c
@@ -49,8 +49,8 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ath12k_wmi_init_country_arg arg;
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar = ath12k_ah_to_ar(ah);
- int ret;
+ struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
+ int ret, i;
ath12k_dbg(ar->ab, ATH12K_DBG_REG,
"Regulatory Notification received for %s\n", wiphy_name(wiphy));
@@ -85,10 +85,16 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
memcpy(&arg.cc_info.alpha2, request->alpha2, 2);
arg.cc_info.alpha2[2] = 0;
- ret = ath12k_wmi_send_init_country_cmd(ar, &arg);
- if (ret)
- ath12k_warn(ar->ab,
- "INIT Country code set to fw failed : %d\n", ret);
+ /* Allow fresh updates to wiphy regd */
+ ah->regd_updated = false;
+
+ /* Send the reg change request to all the radios */
+ for_each_ar(ah, ar, i) {
+ ret = ath12k_wmi_send_init_country_cmd(ar, &arg);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "INIT Country code set to fw failed : %d\n", ret);
+ }
}
int ath12k_reg_update_chan_list(struct ath12k *ar)
@@ -200,12 +206,34 @@ static void ath12k_copy_regd(struct ieee80211_regdomain *regd_orig,
int ath12k_regd_update(struct ath12k *ar, bool init)
{
- struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+ struct ieee80211_hw *hw = ah->hw;
struct ieee80211_regdomain *regd, *regd_copy = NULL;
int ret, regd_len, pdev_id;
struct ath12k_base *ab;
+ int i;
ab = ar->ab;
+
+ /* If one of the radios within ah has already updated the regd for
+ * the wiphy, then avoid setting regd again
+ */
+ if (ah->regd_updated)
+ return 0;
+
+ /* firmware provides reg rules which are similar for 2 GHz and 5 GHz
+ * pdev but 6 GHz pdev has superset of all rules including rules for
+ * all bands, we prefer 6 GHz pdev's rules to be used for setup of
+ * the wiphy regd.
+ * If 6 GHz pdev was part of the ath12k_hw, wait for the 6 GHz pdev,
+ * else pick the first pdev which calls this function and use its
+ * regd to update global hw regd.
+ * The regd_updated flag set at the end will not allow any further
+ * updates.
+ */
+ if (ah->use_6ghz_regd && !ar->supports_6ghz)
+ return 0;
+
pdev_id = ar->pdev_idx;
spin_lock_bh(&ab->base_lock);
@@ -258,12 +286,20 @@ int ath12k_regd_update(struct ath12k *ar, bool init)
if (ret)
goto err;
- if (ar->state == ATH12K_STATE_ON) {
+ if (ah->state != ATH12K_HW_STATE_ON)
+ goto skip;
+
+ ah->regd_updated = true;
+ /* Apply the new regd to all the radios, this is expected to be received only once
+ * since we check for ah->regd_updated and allow here only once.
+ */
+ for_each_ar(ah, ar, i) {
+ ab = ar->ab;
ret = ath12k_reg_update_chan_list(ar);
if (ret)
goto err;
}
-
+skip:
return 0;
err:
ath12k_warn(ab, "failed to perform regd update : %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath12k/rx_desc.h b/drivers/net/wireless/ath/ath12k/rx_desc.h
index a0db6702a189..10366bbe9999 100644
--- a/drivers/net/wireless/ath/ath12k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath12k/rx_desc.h
@@ -684,18 +684,17 @@ enum rx_msdu_start_reception_type {
#define RX_MSDU_END_INFO5_SA_IDX_TIMEOUT BIT(0)
#define RX_MSDU_END_INFO5_DA_IDX_TIMEOUT BIT(1)
+#define RX_MSDU_END_INFO5_TO_DS BIT(2)
+#define RX_MSDU_END_INFO5_TID GENMASK(6, 3)
#define RX_MSDU_END_INFO5_SA_IS_VALID BIT(7)
#define RX_MSDU_END_INFO5_DA_IS_VALID BIT(8)
#define RX_MSDU_END_INFO5_DA_IS_MCBC BIT(9)
#define RX_MSDU_END_INFO5_L3_HDR_PADDING GENMASK(11, 10)
#define RX_MSDU_END_INFO5_FIRST_MSDU BIT(12)
#define RX_MSDU_END_INFO5_LAST_MSDU BIT(13)
+#define RX_MSDU_END_INFO5_FROM_DS BIT(14)
#define RX_MSDU_END_INFO5_IP_CHKSUM_FAIL_COPY BIT(15)
-#define RX_MSDU_END_QCN9274_INFO5_TO_DS BIT(2)
-#define RX_MSDU_END_QCN9274_INFO5_TID GENMASK(6, 3)
-#define RX_MSDU_END_QCN9274_INFO5_FROM_DS BIT(14)
-
#define RX_MSDU_END_INFO6_MSDU_DROP BIT(0)
#define RX_MSDU_END_INFO6_REO_DEST_IND GENMASK(5, 1)
#define RX_MSDU_END_INFO6_FLOW_IDX GENMASK(25, 6)
@@ -709,14 +708,14 @@ enum rx_msdu_start_reception_type {
#define RX_MSDU_END_INFO7_FLOW_AGGR_CONTN BIT(8)
#define RX_MSDU_END_INFO7_FISA_TIMEOUT BIT(9)
-#define RX_MSDU_END_QCN9274_INFO7_TCPUDP_CSUM_FAIL_CPY BIT(10)
-#define RX_MSDU_END_QCN9274_INFO7_MSDU_LIMIT_ERROR BIT(11)
-#define RX_MSDU_END_QCN9274_INFO7_FLOW_IDX_TIMEOUT BIT(12)
-#define RX_MSDU_END_QCN9274_INFO7_FLOW_IDX_INVALID BIT(13)
-#define RX_MSDU_END_QCN9274_INFO7_CCE_MATCH BIT(14)
-#define RX_MSDU_END_QCN9274_INFO7_AMSDU_PARSER_ERR BIT(15)
+#define RX_MSDU_END_INFO7_TCPUDP_CSUM_FAIL_CPY BIT(10)
+#define RX_MSDU_END_INFO7_MSDU_LIMIT_ERROR BIT(11)
+#define RX_MSDU_END_INFO7_FLOW_IDX_TIMEOUT BIT(12)
+#define RX_MSDU_END_INFO7_FLOW_IDX_INVALID BIT(13)
+#define RX_MSDU_END_INFO7_CCE_MATCH BIT(14)
+#define RX_MSDU_END_INFO7_AMSDU_PARSER_ERR BIT(15)
-#define RX_MSDU_END_QCN9274_INFO8_KEY_ID GENMASK(7, 0)
+#define RX_MSDU_END_INFO8_KEY_ID GENMASK(7, 0)
#define RX_MSDU_END_INFO9_SERVICE_CODE GENMASK(14, 6)
#define RX_MSDU_END_INFO9_PRIORITY_VALID BIT(15)
@@ -758,8 +757,8 @@ enum rx_msdu_start_reception_type {
#define RX_MSDU_END_INFO12_RECV_BW GENMASK(20, 18)
#define RX_MSDU_END_INFO12_RECEPTION_TYPE GENMASK(23, 21)
-#define RX_MSDU_END_QCN9274_INFO12_MIMO_SS_BITMAP GENMASK(30, 24)
-#define RX_MSDU_END_QCN9274_INFO12_MIMO_DONE_COPY BIT(31)
+#define RX_MSDU_END_INFO12_MIMO_SS_BITMAP GENMASK(30, 24)
+#define RX_MSDU_END_INFO12_MIMO_DONE_COPY BIT(31)
#define RX_MSDU_END_INFO13_FIRST_MPDU BIT(0)
#define RX_MSDU_END_INFO13_MCAST_BCAST BIT(2)
@@ -791,7 +790,7 @@ enum rx_msdu_start_reception_type {
#define RX_MSDU_END_INFO13_UNDECRYPT_FRAME_ERR BIT(30)
#define RX_MSDU_END_INFO13_FCS_ERR BIT(31)
-#define RX_MSDU_END_QCN9274_INFO13_WIFI_PARSER_ERR BIT(15)
+#define RX_MSDU_END_INFO13_WIFI_PARSER_ERR BIT(15)
#define RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE GENMASK(12, 10)
#define RX_MSDU_END_INFO14_RX_BITMAP_NOT_UPDED BIT(13)
@@ -889,65 +888,6 @@ struct rx_msdu_end_qcn9274_compact {
__le32 info14;
} __packed;
-/* These macro definitions are only used for WCN7850 */
-#define RX_MSDU_END_WCN7850_INFO2_KEY_ID BIT(7, 0)
-
-#define RX_MSDU_END_WCN7850_INFO5_MSDU_LIMIT_ERR BIT(2)
-#define RX_MSDU_END_WCN7850_INFO5_IDX_TIMEOUT BIT(3)
-#define RX_MSDU_END_WCN7850_INFO5_IDX_INVALID BIT(4)
-#define RX_MSDU_END_WCN7850_INFO5_WIFI_PARSE_ERR BIT(5)
-#define RX_MSDU_END_WCN7850_INFO5_AMSDU_PARSER_ERR BIT(6)
-#define RX_MSDU_END_WCN7850_INFO5_TCPUDP_CSUM_FAIL_CPY BIT(14)
-
-#define RX_MSDU_END_WCN7850_INFO12_MIMO_SS_BITMAP GENMASK(31, 24)
-
-#define RX_MSDU_END_WCN7850_INFO13_FRAGMENT_FLAG BIT(13)
-#define RX_MSDU_END_WCN7850_INFO13_CCE_MATCH BIT(15)
-
-struct rx_msdu_end_wcn7850 {
- __le16 info0;
- __le16 phy_ppdu_id;
- __le16 ip_hdr_cksum;
- __le16 info1;
- __le16 info2;
- __le16 cumulative_l3_checksum;
- __le32 rule_indication0;
- __le32 rule_indication1;
- __le16 info3;
- __le16 l3_type;
- __le32 ipv6_options_crc;
- __le32 tcp_seq_num;
- __le32 tcp_ack_num;
- __le16 info4;
- __le16 window_size;
- __le16 tcp_udp_chksum;
- __le16 info5;
- __le16 sa_idx;
- __le16 da_idx_or_sw_peer_id;
- __le32 info6;
- __le32 fse_metadata;
- __le16 cce_metadata;
- __le16 sa_sw_peer_id;
- __le16 info7;
- __le16 rsvd0;
- __le16 cumulative_l4_checksum;
- __le16 cumulative_ip_length;
- __le32 info9;
- __le32 info10;
- __le32 info11;
- __le32 toeplitz_hash_2_or_4;
- __le32 flow_id_toeplitz;
- __le32 info12;
- __le32 ppdu_start_timestamp_31_0;
- __le32 ppdu_start_timestamp_63_32;
- __le32 phy_meta_data;
- __le16 vlan_ctag_ci;
- __le16 vlan_stag_ci;
- __le32 rsvd[3];
- __le32 info13;
- __le32 info14;
-} __packed;
-
/* rx_msdu_end
*
* rxpcu_mpdu_filter_in_category
@@ -1578,7 +1518,7 @@ struct rx_pkt_hdr_tlv {
struct hal_rx_desc_wcn7850 {
__le64 msdu_end_tag;
- struct rx_msdu_end_wcn7850 msdu_end;
+ struct rx_msdu_end_qcn9274 msdu_end;
u8 rx_padding0[RX_BE_PADDING0_BYTES];
__le64 mpdu_start_tag;
struct rx_mpdu_start_qcn9274 mpdu_start;
diff --git a/drivers/net/wireless/ath/ath12k/trace.h b/drivers/net/wireless/ath/ath12k/trace.h
index 240737e1542d..253c67accb0e 100644
--- a/drivers/net/wireless/ath/ath12k/trace.h
+++ b/drivers/net/wireless/ath/ath12k/trace.h
@@ -36,8 +36,8 @@ TRACE_EVENT(ath12k_htt_pktlog,
),
TP_fast_assign(
- __assign_str(device, dev_name(ar->ab->dev));
- __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->buf_len = buf_len;
__entry->pktlog_checksum = pktlog_checksum;
memcpy(__get_dynamic_array(pktlog), buf, buf_len);
@@ -73,8 +73,8 @@ TRACE_EVENT(ath12k_htt_ppdu_stats,
),
TP_fast_assign(
- __assign_str(device, dev_name(ar->ab->dev));
- __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->len = len;
__entry->info = ar->pdev->timestamp.info;
__entry->sync_tstmp_lo_us = ar->pdev->timestamp.sync_timestamp_hi_us;
@@ -117,8 +117,8 @@ TRACE_EVENT(ath12k_htt_rxdesc,
),
TP_fast_assign(
- __assign_str(device, dev_name(ar->ab->dev));
- __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->len = len;
__entry->type = type;
__entry->info = ar->pdev->timestamp.info;
@@ -153,8 +153,8 @@ TRACE_EVENT(ath12k_wmi_diag,
),
TP_fast_assign(
- __assign_str(device, dev_name(ab->dev));
- __assign_str(driver, dev_driver_string(ab->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->len = len;
memcpy(__get_dynamic_array(data), data, len);
),
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 9d69a1769926..abb510d235a5 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -228,9 +228,12 @@ void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
config->peer_map_unmap_version = 0x32;
config->twt_ap_pdev_count = ab->num_radios;
config->twt_ap_sta_count = 1000;
+ config->ema_max_vap_cnt = ab->num_radios;
+ config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
+ config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
- config->dp_peer_meta_data_ver = TARGET_RX_PEER_METADATA_VER_V1B;
+ config->peer_metadata_ver = ATH12K_PEER_METADATA_V1B;
}
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -497,6 +500,7 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
mac_caps = wmi_mac_phy_caps + phy_idx;
pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
+ pdev->hw_link_id = ath12k_wmi_mac_phy_get_hw_link_id(mac_caps);
pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
@@ -817,6 +821,8 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
struct wmi_vdev_create_cmd *cmd;
struct sk_buff *skb;
struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
+ bool is_ml_vdev = is_valid_ether_addr(args->mld_addr);
+ struct wmi_vdev_create_mlo_params *ml_params;
struct wmi_tlv *tlv;
int ret, len;
void *ptr;
@@ -826,7 +832,8 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
* both the bands.
*/
len = sizeof(*cmd) + TLV_HDR_SIZE +
- (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
+ (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)) +
+ (is_ml_vdev ? TLV_HDR_SIZE + sizeof(*ml_params) : 0);
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
@@ -841,6 +848,8 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
cmd->vdev_subtype = cpu_to_le32(args->subtype);
cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX);
cmd->pdev_id = cpu_to_le32(args->pdev_id);
+ cmd->mbssid_flags = cpu_to_le32(args->mbssid_flags);
+ cmd->mbssid_tx_vdev_id = cpu_to_le32(args->mbssid_tx_vdev_id);
cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
@@ -858,20 +867,35 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
len = sizeof(*txrx_streams);
txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
len);
- txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G;
+ txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_2G);
txrx_streams->supported_tx_streams =
- args->chains[NL80211_BAND_2GHZ].tx;
+ cpu_to_le32(args->chains[NL80211_BAND_2GHZ].tx);
txrx_streams->supported_rx_streams =
- args->chains[NL80211_BAND_2GHZ].rx;
+ cpu_to_le32(args->chains[NL80211_BAND_2GHZ].rx);
txrx_streams++;
txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
len);
- txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G;
+ txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_5G);
txrx_streams->supported_tx_streams =
- args->chains[NL80211_BAND_5GHZ].tx;
+ cpu_to_le32(args->chains[NL80211_BAND_5GHZ].tx);
txrx_streams->supported_rx_streams =
- args->chains[NL80211_BAND_5GHZ].rx;
+ cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx);
+
+ ptr += WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
+
+ if (is_ml_vdev) {
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ sizeof(*ml_params));
+ ptr += TLV_HDR_SIZE;
+ ml_params = ptr;
+
+ ml_params->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_CREATE_PARAMS,
+ sizeof(*ml_params));
+ ether_addr_copy(ml_params->mld_macaddr.addr, args->mld_addr);
+ }
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
@@ -1014,19 +1038,27 @@ static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
bool restart)
{
+ struct wmi_vdev_start_mlo_params *ml_params;
+ struct wmi_partner_link_info *partner_info;
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_vdev_start_request_cmd *cmd;
struct sk_buff *skb;
struct ath12k_wmi_channel_params *chan;
struct wmi_tlv *tlv;
void *ptr;
- int ret, len;
+ int ret, len, i, ml_arg_size = 0;
if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
return -EINVAL;
len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
+ if (!restart && arg->ml.enabled) {
+ ml_arg_size = TLV_HDR_SIZE + sizeof(*ml_params) +
+ TLV_HDR_SIZE + (arg->ml.num_partner_links *
+ sizeof(*partner_info));
+ len += ml_arg_size;
+ }
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
@@ -1046,6 +1078,7 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
cmd->he_ops = cpu_to_le32(arg->he_ops);
cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags);
+ cmd->mbssid_tx_vdev_id = cpu_to_le32(arg->mbssid_tx_vdev_id);
if (!restart) {
if (arg->ssid) {
@@ -1078,6 +1111,61 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
ptr += sizeof(*tlv);
+ if (ml_arg_size) {
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ sizeof(*ml_params));
+ ptr += TLV_HDR_SIZE;
+
+ ml_params = ptr;
+
+ ml_params->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_START_PARAMS,
+ sizeof(*ml_params));
+
+ ml_params->flags = le32_encode_bits(arg->ml.enabled,
+ ATH12K_WMI_FLAG_MLO_ENABLED) |
+ le32_encode_bits(arg->ml.assoc_link,
+ ATH12K_WMI_FLAG_MLO_ASSOC_LINK) |
+ le32_encode_bits(arg->ml.mcast_link,
+ ATH12K_WMI_FLAG_MLO_MCAST_VDEV) |
+ le32_encode_bits(arg->ml.link_add,
+ ATH12K_WMI_FLAG_MLO_LINK_ADD);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %d start ml flags 0x%x\n",
+ arg->vdev_id, ml_params->flags);
+
+ ptr += sizeof(*ml_params);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ arg->ml.num_partner_links *
+ sizeof(*partner_info));
+ ptr += TLV_HDR_SIZE;
+
+ partner_info = ptr;
+
+ for (i = 0; i < arg->ml.num_partner_links; i++) {
+ partner_info->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PARTNER_LINK_PARAMS,
+ sizeof(*partner_info));
+ partner_info->vdev_id =
+ cpu_to_le32(arg->ml.partner_info[i].vdev_id);
+ partner_info->hw_link_id =
+ cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
+ ether_addr_copy(partner_info->vdev_addr.addr,
+ arg->ml.partner_info[i].addr);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "partner vdev %d hw_link_id %d macaddr%pM\n",
+ partner_info->vdev_id, partner_info->hw_link_id,
+ partner_info->vdev_addr.addr);
+
+ partner_info++;
+ }
+
+ ptr = partner_info;
+ }
+
ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
restart ? "restart" : "start", arg->vdev_id,
arg->freq, arg->mode);
@@ -1097,7 +1185,7 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
return ret;
}
-int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
+int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_vdev_up_cmd *cmd;
@@ -1112,14 +1200,20 @@ int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD,
sizeof(*cmd));
- cmd->vdev_id = cpu_to_le32(vdev_id);
- cmd->vdev_assoc_id = cpu_to_le32(aid);
+ cmd->vdev_id = cpu_to_le32(params->vdev_id);
+ cmd->vdev_assoc_id = cpu_to_le32(params->aid);
+
+ ether_addr_copy(cmd->vdev_bssid.addr, params->bssid);
- ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+ if (params->tx_bssid) {
+ ether_addr_copy(cmd->tx_vdev_bssid.addr, params->tx_bssid);
+ cmd->nontx_profile_idx = cpu_to_le32(params->nontx_profile_idx);
+ cmd->nontx_profile_cnt = cpu_to_le32(params->nontx_profile_cnt);
+ }
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
- vdev_id, aid, bssid);
+ params->vdev_id, params->aid, params->bssid);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
if (ret) {
@@ -1136,9 +1230,14 @@ int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_peer_create_cmd *cmd;
struct sk_buff *skb;
- int ret;
+ int ret, len;
+ struct wmi_peer_create_mlo_params *ml_param;
+ void *ptr;
+ struct wmi_tlv *tlv;
- skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*ml_param);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
@@ -1150,9 +1249,23 @@ int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
cmd->peer_type = cpu_to_le32(arg->peer_type);
cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ ptr = skb->data + sizeof(*cmd);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ sizeof(*ml_param));
+ ptr += TLV_HDR_SIZE;
+ ml_param = ptr;
+ ml_param->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_CREATE_PARAMS,
+ sizeof(*ml_param));
+ if (arg->ml_enabled)
+ ml_param->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
+
+ ptr += sizeof(*ml_param);
+
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
- "WMI peer create vdev_id %d peer_addr %pM\n",
- arg->vdev_id, arg->peer_addr);
+ "WMI peer create vdev_id %d peer_addr %pM ml_flags 0x%x\n",
+ arg->vdev_id, arg->peer_addr, ml_param->flags);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
if (ret) {
@@ -1525,6 +1638,7 @@ int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar,
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
sizeof(*cmd));
cmd->req_type = cpu_to_le32(type);
+ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI bss chan info req type %d\n", type);
@@ -1776,13 +1890,15 @@ int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
- struct sk_buff *bcn)
+ struct sk_buff *bcn,
+ struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_bcn_tmpl_cmd *cmd;
struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
struct wmi_tlv *tlv;
struct sk_buff *skb;
+ u32 ema_params = 0;
void *ptr;
int ret, len;
size_t aligned_len = roundup(bcn->len, 4);
@@ -1801,6 +1917,16 @@ int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
cmd->csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[0]);
cmd->ext_csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[1]);
cmd->buf_len = cpu_to_le32(bcn->len);
+ cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off);
+ if (ema_args) {
+ u32p_replace_bits(&ema_params, ema_args->bcn_cnt, WMI_EMA_BEACON_CNT);
+ u32p_replace_bits(&ema_params, ema_args->bcn_index, WMI_EMA_BEACON_IDX);
+ if (ema_args->bcn_index == 0)
+ u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_FIRST);
+ if (ema_args->bcn_index + 1 == ema_args->bcn_cnt)
+ u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_LAST);
+ cmd->ema_params = cpu_to_le32(ema_params);
+ }
ptr = skb->data + sizeof(*cmd);
@@ -1900,7 +2026,7 @@ static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
if (arg->bw_160)
cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ);
if (arg->bw_320)
- cmd->peer_flags |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
+ cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
/* Typically if STBC is enabled for VHT it should be enabled
* for HT as well
@@ -1975,12 +2101,15 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
struct ath12k_wmi_vht_rate_set_params *mcs;
struct ath12k_wmi_he_rate_set_params *he_mcs;
struct ath12k_wmi_eht_rate_set_params *eht_mcs;
+ struct wmi_peer_assoc_mlo_params *ml_params;
+ struct wmi_peer_assoc_mlo_partner_info_params *partner_info;
struct sk_buff *skb;
struct wmi_tlv *tlv;
void *ptr;
u32 peer_legacy_rates_align;
u32 peer_ht_rates_align;
int i, ret, len;
+ __le32 v;
peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
sizeof(u32));
@@ -1992,8 +2121,13 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
sizeof(*mcs) + TLV_HDR_SIZE +
(sizeof(*he_mcs) * arg->peer_he_mcs_count) +
- TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count) +
- TLV_HDR_SIZE + TLV_HDR_SIZE;
+ TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count);
+
+ if (arg->ml.enabled)
+ len += TLV_HDR_SIZE + sizeof(*ml_params) +
+ TLV_HDR_SIZE + (arg->ml.num_partner_links * sizeof(*partner_info));
+ else
+ len += (2 * TLV_HDR_SIZE);
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
@@ -2117,12 +2251,38 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
ptr += sizeof(*he_mcs);
}
- /* MLO header tag with 0 length */
- len = 0;
tlv = ptr;
+ len = arg->ml.enabled ? sizeof(*ml_params) : 0;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
ptr += TLV_HDR_SIZE;
+ if (!len)
+ goto skip_ml_params;
+
+ ml_params = ptr;
+ ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_ASSOC_PARAMS,
+ len);
+ ml_params->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
+
+ if (arg->ml.assoc_link)
+ ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
+
+ if (arg->ml.primary_umac)
+ ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
+
+ if (arg->ml.logical_link_idx_valid)
+ ml_params->flags |=
+ cpu_to_le32(ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID);
+
+ if (arg->ml.peer_id_valid)
+ ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PEER_ID_VALID);
+
+ ether_addr_copy(ml_params->mld_addr.addr, arg->ml.mld_addr);
+ ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx);
+ ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id);
+ ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id);
+ ptr += sizeof(*ml_params);
+skip_ml_params:
/* Loop through the EHT rate set */
len = arg->peer_eht_mcs_count * sizeof(*eht_mcs);
tlv = ptr;
@@ -2139,12 +2299,45 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
ptr += sizeof(*eht_mcs);
}
- /* ML partner links tag with 0 length */
- len = 0;
tlv = ptr;
+ len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0;
+ /* fill ML Partner links */
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
ptr += TLV_HDR_SIZE;
+ if (len == 0)
+ goto send;
+
+ for (i = 0; i < arg->ml.num_partner_links; i++) {
+ u32 cmd = WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC;
+
+ partner_info = ptr;
+ partner_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(cmd,
+ sizeof(*partner_info));
+ partner_info->vdev_id = cpu_to_le32(arg->ml.partner_info[i].vdev_id);
+ partner_info->hw_link_id =
+ cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
+ partner_info->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
+
+ if (arg->ml.partner_info[i].assoc_link)
+ partner_info->flags |=
+ cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
+
+ if (arg->ml.partner_info[i].primary_umac)
+ partner_info->flags |=
+ cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
+
+ if (arg->ml.partner_info[i].logical_link_idx_valid) {
+ v = cpu_to_le32(ATH12K_WMI_FLAG_MLO_LINK_ID_VALID);
+ partner_info->flags |= v;
+ }
+
+ partner_info->logical_link_idx =
+ cpu_to_le32(arg->ml.partner_info[i].logical_link_idx);
+ ptr += sizeof(*partner_info);
+ }
+
+send:
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x\n",
cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
@@ -2723,6 +2916,149 @@ int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar,
return ret;
}
+int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id,
+ const u8 *buf, size_t buf_len)
+{
+ struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
+ struct wmi_pdev_set_bios_interface_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u8 *ptr;
+ u32 len, len_aligned;
+ int ret;
+
+ len_aligned = roundup(buf_len, sizeof(u32));
+ len = sizeof(*cmd) + TLV_HDR_SIZE + len_aligned;
+
+ skb = ath12k_wmi_alloc_skb(wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_bios_interface_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
+ cmd->param_type_id = cpu_to_le32(param_id);
+ cmd->length = cpu_to_le32(buf_len);
+
+ ptr = skb->data + sizeof(*cmd);
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len_aligned);
+ ptr += TLV_HDR_SIZE;
+ memcpy(ptr, buf, buf_len);
+
+ ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
+ skb,
+ WMI_PDEV_SET_BIOS_INTERFACE_CMDID);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID parameter id %d: %d\n",
+ param_id, ret);
+ dev_kfree_skb(skb);
+ }
+
+ return 0;
+}
+
+int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table)
+{
+ struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
+ struct wmi_pdev_set_bios_sar_table_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int ret;
+ u8 *buf_ptr;
+ u32 len, sar_table_len_aligned, sar_dbs_backoff_len_aligned;
+ const u8 *psar_value = psar_table + ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET;
+ const u8 *pdbs_value = psar_table + ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET;
+
+ sar_table_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_TABLE_LEN, sizeof(u32));
+ sar_dbs_backoff_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN,
+ sizeof(u32));
+ len = sizeof(*cmd) + TLV_HDR_SIZE + sar_table_len_aligned +
+ TLV_HDR_SIZE + sar_dbs_backoff_len_aligned;
+
+ skb = ath12k_wmi_alloc_skb(wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_bios_sar_table_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
+ cmd->sar_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
+ cmd->dbs_backoff_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
+
+ buf_ptr = skb->data + sizeof(*cmd);
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
+ sar_table_len_aligned);
+ buf_ptr += TLV_HDR_SIZE;
+ memcpy(buf_ptr, psar_value, ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
+
+ buf_ptr += sar_table_len_aligned;
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
+ sar_dbs_backoff_len_aligned);
+ buf_ptr += TLV_HDR_SIZE;
+ memcpy(buf_ptr, pdbs_value, ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
+
+ ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
+ skb,
+ WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID %d\n",
+ ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table)
+{
+ struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
+ struct wmi_pdev_set_bios_geo_table_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int ret;
+ u8 *buf_ptr;
+ u32 len, sar_geo_len_aligned;
+ const u8 *pgeo_value = pgeo_table + ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET;
+
+ sar_geo_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN, sizeof(u32));
+ len = sizeof(*cmd) + TLV_HDR_SIZE + sar_geo_len_aligned;
+
+ skb = ath12k_wmi_alloc_skb(wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_bios_geo_table_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
+ cmd->geo_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
+
+ buf_ptr = skb->data + sizeof(*cmd);
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, sar_geo_len_aligned);
+ buf_ptr += TLV_HDR_SIZE;
+ memcpy(buf_ptr, pgeo_value, ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
+
+ ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
+ skb,
+ WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to send WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID %d\n",
+ ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 initiator, u32 reason)
{
@@ -3324,16 +3660,19 @@ ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cf
wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size);
wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters);
wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id);
- wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config);
+ wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config |
+ WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64);
wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version);
wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
- wmi_cfg->flags2 = le32_encode_bits(tg_cfg->dp_peer_meta_data_ver,
+ wmi_cfg->flags2 = le32_encode_bits(tg_cfg->peer_metadata_ver,
WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION);
-
wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
+ wmi_cfg->ema_max_vap_cnt = cpu_to_le32(tg_cfg->ema_max_vap_cnt);
+ wmi_cfg->ema_max_profile_period = cpu_to_le32(tg_cfg->ema_max_profile_period);
+ wmi_cfg->flags2 |= cpu_to_le32(WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET);
}
static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi,
@@ -3546,6 +3885,7 @@ int ath12k_wmi_cmd_init(struct ath12k_base *ab)
arg.res_cfg.is_reg_cc_ext_event_supported = true;
ab->hw_params->wmi_init(ab, &arg.res_cfg);
+ ab->wow.wmi_conf_rx_decap_mode = arg.res_cfg.rx_decap_mode;
arg.num_mem_chunks = wmi_ab->num_mem_chunks;
arg.hw_mode_id = wmi_ab->preferred_hw_mode;
@@ -3557,6 +3897,8 @@ int ath12k_wmi_cmd_init(struct ath12k_base *ab)
arg.num_band_to_mac = ab->num_radios;
ath12k_fill_band_to_mac_param(ab, arg.band_to_mac);
+ ab->dp.peer_metadata_ver = arg.res_cfg.peer_metadata_ver;
+
return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg);
}
@@ -3664,7 +4006,7 @@ int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar,
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ,
sizeof(*cmd));
- cmd->pdev_id = cpu_to_le32(DP_SW2HW_MACID(arg->pdev_id));
+ cmd->pdev_id = cpu_to_le32(arg->pdev_id);
cmd->module_id = cpu_to_le32(arg->module_id);
cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo);
cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi);
@@ -4041,6 +4383,7 @@ static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab)
{
kfree(ab->db_caps);
ab->db_caps = NULL;
+ ab->num_db_cap = 0;
}
static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab,
@@ -4319,6 +4662,9 @@ ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
caps->eht_cap_info_internal);
}
+ pdev->cap.eml_cap = le32_to_cpu(caps->eml_capability);
+ pdev->cap.mld_cap = le32_to_cpu(caps->mld_capability);
+
return 0;
}
@@ -4505,6 +4851,22 @@ static struct ath12k_reg_rule
return reg_rule_ptr;
}
+static u8 ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params *rule,
+ u32 num_reg_rules)
+{
+ u8 num_invalid_5ghz_rules = 0;
+ u32 count, start_freq;
+
+ for (count = 0; count < num_reg_rules; count++) {
+ start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ);
+
+ if (start_freq >= ATH12K_MIN_6G_FREQ)
+ num_invalid_5ghz_rules++;
+ }
+
+ return num_invalid_5ghz_rules;
+}
+
static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
struct sk_buff *skb,
struct ath12k_reg_info *reg_info)
@@ -4515,6 +4877,7 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
u32 num_2g_reg_rules, num_5g_reg_rules;
u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ u8 num_invalid_5ghz_ext_rules;
u32 total_reg_rules = 0;
int ret, i, j;
@@ -4608,20 +4971,6 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
- /* FIXME: Currently FW includes 6G reg rule also in 5G rule
- * list for country US.
- * Having same 6G reg rule in 5G and 6G rules list causes
- * intersect check to be true, and same rules will be shown
- * multiple times in iw cmd. So added hack below to avoid
- * parsing 6G rule from 5G reg rule list, and this can be
- * removed later, after FW updates to remove 6G reg rule
- * from 5G rules list.
- */
- if (memcmp(reg_info->alpha2, "US", 2) == 0) {
- reg_info->num_5g_reg_rules = REG_US_5G_NUM_REG_RULES;
- num_5g_reg_rules = reg_info->num_5g_reg_rules;
- }
-
reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
reg_info->num_phy = le32_to_cpu(ev->num_phy);
@@ -4724,8 +5073,29 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
}
}
+ ext_wmi_reg_rule += num_2g_reg_rules;
+
+ /* Firmware might include 6 GHz reg rule in 5 GHz rule list
+ * for few countries along with separate 6 GHz rule.
+ * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list
+ * causes intersect check to be true, and same rules will be
+ * shown multiple times in iw cmd.
+ * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list
+ */
+ num_invalid_5ghz_ext_rules = ath12k_wmi_ignore_num_extra_rules(ext_wmi_reg_rule,
+ num_5g_reg_rules);
+
+ if (num_invalid_5ghz_ext_rules) {
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules",
+ reg_info->alpha2, reg_info->num_5g_reg_rules,
+ num_invalid_5ghz_ext_rules);
+
+ num_5g_reg_rules = num_5g_reg_rules - num_invalid_5ghz_ext_rules;
+ reg_info->num_5g_reg_rules = num_5g_reg_rules;
+ }
+
if (num_5g_reg_rules) {
- ext_wmi_reg_rule += num_2g_reg_rules;
reg_info->reg_rules_5g_ptr =
create_ext_reg_rules_from_wmi(num_5g_reg_rules,
ext_wmi_reg_rule);
@@ -4737,7 +5107,12 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
}
}
- ext_wmi_reg_rule += num_5g_reg_rules;
+ /* We have adjusted the number of 5 GHz reg rules above. But still those
+ * many rules needs to be adjusted in ext_wmi_reg_rule.
+ *
+ * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases.
+ */
+ ext_wmi_reg_rule += (num_5g_reg_rules + num_invalid_5ghz_ext_rules);
for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
reg_info->reg_rules_6g_ap_ptr[i] =
@@ -5016,6 +5391,9 @@ static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
info->flags |= IEEE80211_TX_STAT_ACK;
+ if ((info->flags & IEEE80211_TX_CTL_NO_ACK) && !status)
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu);
num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
@@ -5548,7 +5926,7 @@ static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *sk
* event. Otherwise, it goes to fallback.
*/
if (ab->hw_params->single_pdev_only &&
- pdev_idx < ab->hw_params->num_rxmda_per_pdev)
+ pdev_idx < ab->hw_params->num_rxdma_per_pdev)
goto mem_free;
else
goto fallback;
@@ -5866,7 +6244,7 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
goto exit;
}
- if ((test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) ||
+ if ((test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) ||
(rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
WMI_RX_STATUS_ERR_KEY_CACHE_MISS |
WMI_RX_STATUS_ERR_CRC))) {
@@ -5877,8 +6255,10 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
status->flag |= RX_FLAG_MMIC_ERROR;
- if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ) {
+ if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ &&
+ rx_ev.chan_freq <= ATH12K_MAX_6G_FREQ) {
status->band = NL80211_BAND_6GHZ;
+ status->freq = rx_ev.chan_freq;
} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
status->band = NL80211_BAND_2GHZ;
} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) {
@@ -5899,8 +6279,10 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
sband = &ar->mac.sbands[status->band];
- status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
- status->band);
+ if (status->band != NL80211_BAND_6GHZ)
+ status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
+ status->band);
+
status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR;
status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
@@ -5927,13 +6309,11 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
}
}
- /* TODO: Pending handle beacon implementation
- *if (ieee80211_is_beacon(hdr->frame_control))
- * ath12k_mac_handle_beacon(ar, skb);
- */
+ if (ieee80211_is_beacon(hdr->frame_control))
+ ath12k_mac_handle_beacon(ar, skb);
ath12k_dbg(ab, ATH12K_DBG_MGMT,
- "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
+ "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
skb, skb->len,
fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
@@ -5993,7 +6373,8 @@ static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab,
spin_lock_bh(&ar->data_lock);
if (ar->scan.state == state &&
- ar->scan.vdev_id == vdev_id) {
+ ar->scan.arvif &&
+ ar->scan.arvif->vdev_id == vdev_id) {
spin_unlock_bh(&ar->data_lock);
return ar;
}
@@ -6137,42 +6518,44 @@ static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_roam_event roam_ev = {};
struct ath12k *ar;
+ u32 vdev_id;
+ u8 roam_reason;
if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
ath12k_warn(ab, "failed to extract roam event");
return;
}
+ vdev_id = le32_to_cpu(roam_ev.vdev_id);
+ roam_reason = u32_get_bits(le32_to_cpu(roam_ev.reason),
+ WMI_ROAM_REASON_MASK);
+
ath12k_dbg(ab, ATH12K_DBG_WMI,
- "wmi roam event vdev %u reason 0x%08x rssi %d\n",
- roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi);
+ "wmi roam event vdev %u reason %d rssi %d\n",
+ vdev_id, roam_reason, roam_ev.rssi);
rcu_read_lock();
- ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(roam_ev.vdev_id));
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
if (!ar) {
- ath12k_warn(ab, "invalid vdev id in roam ev %d",
- roam_ev.vdev_id);
+ ath12k_warn(ab, "invalid vdev id in roam ev %d", vdev_id);
rcu_read_unlock();
return;
}
- if (le32_to_cpu(roam_ev.reason) >= WMI_ROAM_REASON_MAX)
+ if (roam_reason >= WMI_ROAM_REASON_MAX)
ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
- roam_ev.reason, roam_ev.vdev_id);
+ roam_reason, vdev_id);
- switch (le32_to_cpu(roam_ev.reason)) {
+ switch (roam_reason) {
case WMI_ROAM_REASON_BEACON_MISS:
- /* TODO: Pending beacon miss and connection_loss_work
- * implementation
- * ath12k_mac_handle_beacon_miss(ar, vdev_id);
- */
+ ath12k_mac_handle_beacon_miss(ar, vdev_id);
break;
case WMI_ROAM_REASON_BETTER_AP:
case WMI_ROAM_REASON_LOW_RSSI:
case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
case WMI_ROAM_REASON_HO_FAILED:
ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
- roam_ev.reason, roam_ev.vdev_id);
+ roam_reason, vdev_id);
break;
}
@@ -6507,7 +6890,9 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
const u32 *vdev_ids)
{
int i;
- struct ath12k_vif *arvif;
+ struct ieee80211_bss_conf *conf;
+ struct ath12k_link_vif *arvif;
+ struct ath12k_vif *ahvif;
/* Finish CSA once the switch count becomes NULL */
if (ev->current_switch_count)
@@ -6522,9 +6907,23 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
vdev_ids[i]);
continue;
}
+ ahvif = arvif->ahvif;
+
+ if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
+ ath12k_warn(ab, "Invalid CSA switch count even link id: %d\n",
+ arvif->link_id);
+ continue;
+ }
- if (arvif->is_up && arvif->vif->bss_conf.csa_active)
- ieee80211_csa_finish(arvif->vif, 0);
+ conf = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]);
+ if (!conf) {
+ ath12k_warn(ab, "unable to access bss link conf in process csa for vif %pM link %u\n",
+ ahvif->vif->addr, arvif->link_id);
+ continue;
+ }
+
+ if (arvif->is_up && conf->csa_active)
+ ieee80211_csa_finish(ahvif->vif, 0);
}
rcu_read_unlock();
}
@@ -6568,6 +6967,7 @@ static void
ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb)
{
const void **tb;
+ struct ath12k_mac_get_any_chanctx_conf_arg arg;
const struct ath12k_wmi_pdev_radar_event *ev;
struct ath12k *ar;
int ret;
@@ -6603,13 +7003,22 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
goto exit;
}
+ arg.ar = ar;
+ arg.chanctx_conf = NULL;
+ ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar),
+ ath12k_mac_get_any_chanctx_conf_iter, &arg);
+ if (!arg.chanctx_conf) {
+ ath12k_warn(ab, "failed to find valid chanctx_conf in radar detected event\n");
+ goto exit;
+ }
+
ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n",
ev->pdev_id);
if (ar->dfs_block_radar_events)
ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
else
- ieee80211_radar_detected(ath12k_ar_to_hw(ar));
+ ieee80211_radar_detected(ath12k_ar_to_hw(ar), arg.chanctx_conf);
exit:
rcu_read_unlock();
@@ -6854,6 +7263,189 @@ exit:
kfree(tb);
}
+static int ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ const struct wmi_wow_ev_pg_fault_param *pf_param;
+ const struct wmi_wow_ev_param *param;
+ struct wmi_wow_ev_arg *arg = data;
+ int pf_len;
+
+ switch (tag) {
+ case WMI_TAG_WOW_EVENT_INFO:
+ param = ptr;
+ arg->wake_reason = le32_to_cpu(param->wake_reason);
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "wow wakeup host reason %d %s\n",
+ arg->wake_reason, wow_reason(arg->wake_reason));
+ break;
+
+ case WMI_TAG_ARRAY_BYTE:
+ if (arg && arg->wake_reason == WOW_REASON_PAGE_FAULT) {
+ pf_param = ptr;
+ pf_len = le32_to_cpu(pf_param->len);
+ if (pf_len > len - sizeof(pf_len) ||
+ pf_len < 0) {
+ ath12k_warn(ab, "invalid wo reason page fault buffer len %d\n",
+ pf_len);
+ return -EINVAL;
+ }
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "wow_reason_page_fault len %d\n",
+ pf_len);
+ ath12k_dbg_dump(ab, ATH12K_DBG_WMI,
+ "wow_reason_page_fault packet present",
+ "wow_pg_fault ",
+ pf_param->data,
+ pf_len);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void ath12k_wmi_event_wow_wakeup_host(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_wow_ev_arg arg = { };
+ int ret;
+
+ ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath12k_wmi_wow_wakeup_host_parse,
+ &arg);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse wmi wow wakeup host event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ complete(&ab->wow.wakeup_completed);
+}
+
+static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const struct wmi_gtk_offload_status_event *ev;
+ struct ath12k_link_vif *arvif;
+ __be64 replay_ctr_be;
+ u64 replay_ctr;
+ const void **tb;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch gtk offload status ev");
+ kfree(tb);
+ return;
+ }
+
+ rcu_read_lock();
+ arvif = ath12k_mac_get_arvif_by_vdev_id(ab, le32_to_cpu(ev->vdev_id));
+ if (!arvif) {
+ rcu_read_unlock();
+ ath12k_warn(ab, "failed to get arvif for vdev_id:%d\n",
+ le32_to_cpu(ev->vdev_id));
+ kfree(tb);
+ return;
+ }
+
+ replay_ctr = le64_to_cpu(ev->replay_ctr);
+ arvif->rekey_data.replay_ctr = replay_ctr;
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi gtk offload event refresh_cnt %d replay_ctr %llu\n",
+ le32_to_cpu(ev->refresh_cnt), replay_ctr);
+
+ /* supplicant expects big-endian replay counter */
+ replay_ctr_be = cpu_to_be64(replay_ctr);
+
+ ieee80211_gtk_rekey_notify(arvif->ahvif->vif, arvif->bssid,
+ (void *)&replay_ctr_be, GFP_ATOMIC);
+
+ rcu_read_unlock();
+
+ kfree(tb);
+}
+
+static void ath12k_wmi_event_mlo_setup_complete(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const struct wmi_mlo_setup_complete_event *ev;
+ struct ath12k *ar = NULL;
+ struct ath12k_pdev *pdev;
+ const void **tb;
+ int ret, i;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse mlo setup complete event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_MLO_SETUP_COMPLETE_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch mlo setup complete event\n");
+ kfree(tb);
+ return;
+ }
+
+ if (le32_to_cpu(ev->pdev_id) > ab->num_radios)
+ goto skip_lookup;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ if (pdev && pdev->pdev_id == le32_to_cpu(ev->pdev_id)) {
+ ar = pdev->ar;
+ break;
+ }
+ }
+
+skip_lookup:
+ if (!ar) {
+ ath12k_warn(ab, "invalid pdev_id %d status %u in setup complete event\n",
+ ev->pdev_id, ev->status);
+ goto out;
+ }
+
+ ar->mlo_setup_status = le32_to_cpu(ev->status);
+ complete(&ar->mlo_setup_done);
+
+out:
+ kfree(tb);
+}
+
+static void ath12k_wmi_event_teardown_complete(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const struct wmi_mlo_teardown_complete_event *ev;
+ const void **tb;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse teardown complete event tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_MLO_TEARDOWN_COMPLETE];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch teardown complete event\n");
+ kfree(tb);
+ return;
+ }
+
+ kfree(tb);
+}
+
static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -6958,13 +7550,6 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_P2P_NOA_EVENTID:
ath12k_wmi_p2p_noa_event(ab, skb);
break;
- /* add Unsupported events here */
- case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
- case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
- case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
- ath12k_dbg(ab, ATH12K_DBG_WMI,
- "ignoring unsupported event 0x%x\n", id);
- break;
case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb);
break;
@@ -6974,6 +7559,31 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_DIAG_EVENTID:
ath12k_wmi_diag_event(ab, skb);
break;
+ case WMI_WOW_WAKEUP_HOST_EVENTID:
+ ath12k_wmi_event_wow_wakeup_host(ab, skb);
+ break;
+ case WMI_GTK_OFFLOAD_STATUS_EVENTID:
+ ath12k_wmi_gtk_offload_status_event(ab, skb);
+ break;
+ case WMI_MLO_SETUP_COMPLETE_EVENTID:
+ ath12k_wmi_event_mlo_setup_complete(ab, skb);
+ break;
+ case WMI_MLO_TEARDOWN_COMPLETE_EVENTID:
+ ath12k_wmi_event_teardown_complete(ab, skb);
+ break;
+ /* add Unsupported events (rare) here */
+ case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
+ case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
+ case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "ignoring unsupported event 0x%x\n", id);
+ break;
+ /* add Unsupported events (frequent) here */
+ case WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID:
+ case WMI_MGMT_RX_FW_CONSUMED_EVENTID:
+ case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
+ /* debug might flood hence silently ignore (no-op) */
+ break;
/* TODO: Add remaining events */
default:
ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
@@ -6988,9 +7598,11 @@ static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab,
u32 pdev_idx)
{
int status;
- u32 svc_id[] = { ATH12K_HTC_SVC_ID_WMI_CONTROL,
- ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
- ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 };
+ static const u32 svc_id[] = {
+ ATH12K_HTC_SVC_ID_WMI_CONTROL,
+ ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
+ ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2
+ };
struct ath12k_htc_svc_conn_req conn_req = {};
struct ath12k_htc_svc_conn_resp conn_resp = {};
@@ -7076,13 +7688,13 @@ ath12k_wmi_send_unit_test_cmd(struct ath12k *ar,
int ath12k_wmi_simulate_radar(struct ath12k *ar)
{
- struct ath12k_vif *arvif;
+ struct ath12k_link_vif *arvif;
u32 dfs_args[DFS_MAX_TEST_ARGS];
struct wmi_unit_test_cmd wmi_ut;
bool arvif_found = false;
list_for_each_entry(arvif, &ar->arvifs, list) {
- if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ if (arvif->is_started && arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
arvif_found = true;
break;
}
@@ -7183,3 +7795,709 @@ void ath12k_wmi_detach(struct ath12k_base *ab)
ath12k_wmi_free_dbring_caps(ab);
}
+
+int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_arg *arg)
+{
+ struct wmi_hw_data_filter_cmd *cmd;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_hw_data_filter_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HW_DATA_FILTER_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ cmd->enable = cpu_to_le32(arg->enable ? 1 : 0);
+
+ /* Set all modes in case of disable */
+ if (arg->enable)
+ cmd->hw_filter_bitmap = cpu_to_le32(arg->hw_filter_bitmap);
+ else
+ cmd->hw_filter_bitmap = cpu_to_le32((u32)~0U);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi hw data filter enable %d filter_bitmap 0x%x\n",
+ arg->enable, arg->hw_filter_bitmap);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
+}
+
+int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar)
+{
+ struct wmi_wow_host_wakeup_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_host_wakeup_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
+ sizeof(*cmd));
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID);
+}
+
+int ath12k_wmi_wow_enable(struct ath12k *ar)
+{
+ struct wmi_wow_enable_cmd *cmd;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_enable_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ENABLE_CMD,
+ sizeof(*cmd));
+
+ cmd->enable = cpu_to_le32(1);
+ cmd->pause_iface_config = cpu_to_le32(WOW_IFACE_PAUSE_ENABLED);
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow enable\n");
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
+}
+
+int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable)
+{
+ struct wmi_wow_add_del_event_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_add_del_event_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_DEL_EVT_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->is_add = cpu_to_le32(enable);
+ cmd->event_bitmap = cpu_to_le32((1 << event));
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
+ wow_wakeup_event(event), enable, vdev_id);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
+}
+
+int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id,
+ const u8 *pattern, const u8 *mask,
+ int pattern_len, int pattern_offset)
+{
+ struct wmi_wow_add_pattern_cmd *cmd;
+ struct wmi_wow_bitmap_pattern_params *bitmap;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) + /* array struct */
+ sizeof(*bitmap) + /* bitmap */
+ sizeof(*tlv) + /* empty ipv4 sync */
+ sizeof(*tlv) + /* empty ipv6 sync */
+ sizeof(*tlv) + /* empty magic */
+ sizeof(*tlv) + /* empty info timeout */
+ sizeof(*tlv) + sizeof(u32); /* ratelimit interval */
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ /* cmd */
+ ptr = skb->data;
+ cmd = ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_PATTERN_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->pattern_id = cpu_to_le32(pattern_id);
+ cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
+
+ ptr += sizeof(*cmd);
+
+ /* bitmap */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*bitmap));
+
+ ptr += sizeof(*tlv);
+
+ bitmap = ptr;
+ bitmap->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_BITMAP_PATTERN_T,
+ sizeof(*bitmap));
+ memcpy(bitmap->patternbuf, pattern, pattern_len);
+ memcpy(bitmap->bitmaskbuf, mask, pattern_len);
+ bitmap->pattern_offset = cpu_to_le32(pattern_offset);
+ bitmap->pattern_len = cpu_to_le32(pattern_len);
+ bitmap->bitmask_len = cpu_to_le32(pattern_len);
+ bitmap->pattern_id = cpu_to_le32(pattern_id);
+
+ ptr += sizeof(*bitmap);
+
+ /* ipv4 sync */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* ipv6 sync */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* magic */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* pattern info timeout */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* ratelimit interval */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d pattern_len %d\n",
+ vdev_id, pattern_id, pattern_offset, pattern_len);
+
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow pattern: ",
+ bitmap->patternbuf, pattern_len);
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow bitmask: ",
+ bitmap->bitmaskbuf, pattern_len);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
+}
+
+int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id)
+{
+ struct wmi_wow_del_pattern_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_del_pattern_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_DEL_PATTERN_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->pattern_id = cpu_to_le32(pattern_id);
+ cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
+ vdev_id, pattern_id);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
+}
+
+static struct sk_buff *
+ath12k_wmi_op_gen_config_pno_start(struct ath12k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req_arg *pno)
+{
+ struct nlo_configured_params *nlo_list;
+ size_t len, nlo_list_len, channel_list_len;
+ struct wmi_wow_nlo_config_cmd *cmd;
+ __le32 *channel_list;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ u32 i;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) +
+ /* TLV place holder for array of structures
+ * nlo_configured_params(nlo_list)
+ */
+ sizeof(*tlv);
+ /* TLV place holder for array of uint32 channel_list */
+
+ channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count;
+ len += channel_list_len;
+
+ nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count;
+ len += nlo_list_len;
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = skb->data;
+ cmd = ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(pno->vdev_id);
+ cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
+
+ /* current FW does not support min-max range for dwell time */
+ cmd->active_dwell_time = cpu_to_le32(pno->active_max_time);
+ cmd->passive_dwell_time = cpu_to_le32(pno->passive_max_time);
+
+ if (pno->do_passive_scan)
+ cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
+
+ cmd->fast_scan_period = cpu_to_le32(pno->fast_scan_period);
+ cmd->slow_scan_period = cpu_to_le32(pno->slow_scan_period);
+ cmd->fast_scan_max_cycles = cpu_to_le32(pno->fast_scan_max_cycles);
+ cmd->delay_start_time = cpu_to_le32(pno->delay_start_time);
+
+ if (pno->enable_pno_scan_randomization) {
+ cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
+ WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
+ ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
+ ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
+ }
+
+ ptr += sizeof(*cmd);
+
+ /* nlo_configured_params(nlo_list) */
+ cmd->no_of_ssids = cpu_to_le32(pno->uc_networks_count);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, nlo_list_len);
+
+ ptr += sizeof(*tlv);
+ nlo_list = ptr;
+ for (i = 0; i < pno->uc_networks_count; i++) {
+ tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
+ tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
+ sizeof(*nlo_list));
+
+ nlo_list[i].ssid.valid = cpu_to_le32(1);
+ nlo_list[i].ssid.ssid.ssid_len =
+ cpu_to_le32(pno->a_networks[i].ssid.ssid_len);
+ memcpy(nlo_list[i].ssid.ssid.ssid,
+ pno->a_networks[i].ssid.ssid,
+ le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
+
+ if (pno->a_networks[i].rssi_threshold &&
+ pno->a_networks[i].rssi_threshold > -300) {
+ nlo_list[i].rssi_cond.valid = cpu_to_le32(1);
+ nlo_list[i].rssi_cond.rssi =
+ cpu_to_le32(pno->a_networks[i].rssi_threshold);
+ }
+
+ nlo_list[i].bcast_nw_type.valid = cpu_to_le32(1);
+ nlo_list[i].bcast_nw_type.bcast_nw_type =
+ cpu_to_le32(pno->a_networks[i].bcast_nw_type);
+ }
+
+ ptr += nlo_list_len;
+ cmd->num_of_channels = cpu_to_le32(pno->a_networks[0].channel_count);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, channel_list_len);
+ ptr += sizeof(*tlv);
+ channel_list = ptr;
+
+ for (i = 0; i < pno->a_networks[0].channel_count; i++)
+ channel_list[i] = cpu_to_le32(pno->a_networks[0].channels[i]);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
+ vdev_id);
+
+ return skb;
+}
+
+static struct sk_buff *ath12k_wmi_op_gen_config_pno_stop(struct ath12k *ar,
+ u32 vdev_id)
+{
+ struct wmi_wow_nlo_config_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_wow_nlo_config_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, len);
+
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_STOP);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi tlv stop pno config vdev_id %d\n", vdev_id);
+ return skb;
+}
+
+int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req_arg *pno_scan)
+{
+ struct sk_buff *skb;
+
+ if (pno_scan->enable)
+ skb = ath12k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
+ else
+ skb = ath12k_wmi_op_gen_config_pno_stop(ar, vdev_id);
+
+ if (IS_ERR_OR_NULL(skb))
+ return -ENOMEM;
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
+}
+
+static void ath12k_wmi_fill_ns_offload(struct ath12k *ar,
+ struct wmi_arp_ns_offload_arg *offload,
+ void **ptr,
+ bool enable,
+ bool ext)
+{
+ struct wmi_ns_offload_params *ns;
+ struct wmi_tlv *tlv;
+ void *buf_ptr = *ptr;
+ u32 ns_cnt, ns_ext_tuples;
+ int i, max_offloads;
+
+ ns_cnt = offload->ipv6_count;
+
+ tlv = buf_ptr;
+
+ if (ext) {
+ ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ ns_ext_tuples * sizeof(*ns));
+ i = WMI_MAX_NS_OFFLOADS;
+ max_offloads = offload->ipv6_count;
+ } else {
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ WMI_MAX_NS_OFFLOADS * sizeof(*ns));
+ i = 0;
+ max_offloads = WMI_MAX_NS_OFFLOADS;
+ }
+
+ buf_ptr += sizeof(*tlv);
+
+ for (; i < max_offloads; i++) {
+ ns = buf_ptr;
+ ns->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NS_OFFLOAD_TUPLE,
+ sizeof(*ns));
+
+ if (enable) {
+ if (i < ns_cnt)
+ ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_VALID);
+
+ memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16);
+ memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16);
+
+ if (offload->ipv6_type[i])
+ ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_IS_IPV6_ANYCAST);
+
+ memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN);
+
+ if (!is_zero_ether_addr(ns->target_mac.addr))
+ ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_MAC_VALID);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi index %d ns_solicited %pI6 target %pI6",
+ i, ns->solicitation_ipaddr,
+ ns->target_ipaddr[0]);
+ }
+
+ buf_ptr += sizeof(*ns);
+ }
+
+ *ptr = buf_ptr;
+}
+
+static void ath12k_wmi_fill_arp_offload(struct ath12k *ar,
+ struct wmi_arp_ns_offload_arg *offload,
+ void **ptr,
+ bool enable)
+{
+ struct wmi_arp_offload_params *arp;
+ struct wmi_tlv *tlv;
+ void *buf_ptr = *ptr;
+ int i;
+
+ /* fill arp tuple */
+ tlv = buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ WMI_MAX_ARP_OFFLOADS * sizeof(*arp));
+ buf_ptr += sizeof(*tlv);
+
+ for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
+ arp = buf_ptr;
+ arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARP_OFFLOAD_TUPLE,
+ sizeof(*arp));
+
+ if (enable && i < offload->ipv4_count) {
+ /* Copy the target ip addr and flags */
+ arp->flags = cpu_to_le32(WMI_ARPOL_FLAGS_VALID);
+ memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi arp offload address %pI4",
+ arp->target_ipaddr);
+ }
+
+ buf_ptr += sizeof(*arp);
+ }
+
+ *ptr = buf_ptr;
+}
+
+int ath12k_wmi_arp_ns_offload(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct wmi_arp_ns_offload_arg *offload,
+ bool enable)
+{
+ struct wmi_set_arp_ns_offload_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *buf_ptr;
+ size_t len;
+ u8 ns_cnt, ns_ext_tuples = 0;
+
+ ns_cnt = offload->ipv6_count;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) +
+ WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_params) +
+ sizeof(*tlv) +
+ WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_params);
+
+ if (ns_cnt > WMI_MAX_NS_OFFLOADS) {
+ ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS;
+ len += sizeof(*tlv) +
+ ns_ext_tuples * sizeof(struct wmi_ns_offload_params);
+ }
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ buf_ptr = skb->data;
+ cmd = buf_ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_ARP_NS_OFFLOAD_CMD,
+ sizeof(*cmd));
+ cmd->flags = cpu_to_le32(0);
+ cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
+ cmd->num_ns_ext_tuples = cpu_to_le32(ns_ext_tuples);
+
+ buf_ptr += sizeof(*cmd);
+
+ ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0);
+ ath12k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable);
+
+ if (ns_ext_tuples)
+ ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
+}
+
+int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
+ struct ath12k_link_vif *arvif, bool enable)
+{
+ struct ath12k_rekey_data *rekey_data = &arvif->rekey_data;
+ struct wmi_gtk_rekey_offload_cmd *cmd;
+ struct sk_buff *skb;
+ __le64 replay_ctr;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
+
+ if (enable) {
+ cmd->flags = cpu_to_le32(GTK_OFFLOAD_ENABLE_OPCODE);
+
+ /* the length in rekey_data and cmd is equal */
+ memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck));
+ memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek));
+
+ replay_ctr = cpu_to_le64(rekey_data->replay_ctr);
+ memcpy(cmd->replay_ctr, &replay_ctr,
+ sizeof(replay_ctr));
+ } else {
+ cmd->flags = cpu_to_le32(GTK_OFFLOAD_DISABLE_OPCODE);
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
+ arvif->vdev_id, enable);
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
+}
+
+int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
+ struct ath12k_link_vif *arvif)
+{
+ struct wmi_gtk_rekey_offload_cmd *cmd;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
+ cmd->flags = cpu_to_le32(GTK_OFFLOAD_REQUEST_STATUS_OPCODE);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
+ arvif->vdev_id);
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
+}
+
+int ath12k_wmi_sta_keepalive(struct ath12k *ar,
+ const struct wmi_sta_keepalive_arg *arg)
+{
+ struct wmi_sta_keepalive_arp_resp_params *arp;
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_sta_keepalive_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd) + sizeof(*arp);
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_sta_keepalive_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALIVE_CMD, sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ cmd->enabled = cpu_to_le32(arg->enabled);
+ cmd->interval = cpu_to_le32(arg->interval);
+ cmd->method = cpu_to_le32(arg->method);
+
+ arp = (struct wmi_sta_keepalive_arp_resp_params *)(cmd + 1);
+ arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALVE_ARP_RESPONSE,
+ sizeof(*arp));
+ if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
+ arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
+ arp->src_ip4_addr = cpu_to_le32(arg->src_ip4_addr);
+ arp->dest_ip4_addr = cpu_to_le32(arg->dest_ip4_addr);
+ ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi sta keepalive vdev %d enabled %d method %d interval %d\n",
+ arg->vdev_id, arg->enabled, arg->method, arg->interval);
+
+ return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
+}
+
+int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params)
+{
+ struct wmi_mlo_setup_cmd *cmd;
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ u32 *partner_links, num_links;
+ int i, ret, buf_len, arg_len;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+
+ num_links = mlo_params->num_partner_links;
+ arg_len = num_links * sizeof(u32);
+ buf_len = sizeof(*cmd) + TLV_HDR_SIZE + arg_len;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mlo_setup_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_SETUP_CMD,
+ sizeof(*cmd));
+ cmd->mld_group_id = mlo_params->group_id;
+ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+ ptr = skb->data + sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
+ ptr += TLV_HDR_SIZE;
+
+ partner_links = ptr;
+ for (i = 0; i < num_links; i++)
+ partner_links[i] = mlo_params->partner_link_id[i];
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_SETUP_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit WMI_MLO_SETUP_CMDID command: %d\n",
+ ret);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_wmi_mlo_ready(struct ath12k *ar)
+{
+ struct wmi_mlo_ready_cmd *cmd;
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mlo_ready_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_READY_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_READY_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit WMI_MLO_READY_CMDID command: %d\n",
+ ret);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_wmi_mlo_teardown(struct ath12k *ar)
+{
+ struct wmi_mlo_teardown_cmd *cmd;
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mlo_teardown_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TEARDOWN_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+ cmd->reason_code = WMI_MLO_TEARDOWN_SSR_REASON;
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_TEARDOWN_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit WMI MLO teardown command: %d\n",
+ ret);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index 103462feb935..45fe699ce8a5 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -24,6 +24,7 @@
struct ath12k_base;
struct ath12k;
+struct ath12k_link_vif;
/* There is no signed version of __le32, so for a temporary solution come
* up with our own version. The idea is from fs/ntfs/endian.h.
@@ -164,10 +165,6 @@ struct wmi_tlv {
#define WLAN_SCAN_MAX_HINT_BSSID 10
#define MAX_RNR_BSS 5
-#define WLAN_SCAN_MAX_HINT_S_SSID 10
-#define WLAN_SCAN_MAX_HINT_BSSID 10
-#define MAX_RNR_BSS 5
-
#define WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG 1
#define WMI_BA_MODE_BUFFER_SIZE_256 3
@@ -288,6 +285,7 @@ enum wmi_cmd_group {
WMI_GRP_TWT = 0x3e,
WMI_GRP_MOTION_DET = 0x3f,
WMI_GRP_SPATIAL_REUSE = 0x40,
+ WMI_GRP_MLO = 0x48,
};
#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
@@ -357,6 +355,9 @@ enum wmi_tlv_cmd_id {
WMI_PDEV_DMA_RING_CFG_REQ_CMDID,
WMI_PDEV_HE_TB_ACTION_FRM_CMDID,
WMI_PDEV_PKTLOG_FILTER_CMDID,
+ WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID = 0x4044,
+ WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID = 0x4045,
+ WMI_PDEV_SET_BIOS_INTERFACE_CMDID = 0x404A,
WMI_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_VDEV),
WMI_VDEV_DELETE_CMDID,
WMI_VDEV_START_REQUEST_CMDID,
@@ -665,6 +666,10 @@ enum wmi_tlv_cmd_id {
WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID =
WMI_TLV_CMD(WMI_GRP_SPATIAL_REUSE),
WMI_PDEV_OBSS_PD_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMDID,
+ WMI_MLO_LINK_SET_ACTIVE_CMDID = WMI_TLV_CMD(WMI_GRP_MLO),
+ WMI_MLO_SETUP_CMDID,
+ WMI_MLO_READY_CMDID,
+ WMI_MLO_TEARDOWN_CMDID,
};
enum wmi_tlv_event_id {
@@ -706,6 +711,8 @@ enum wmi_tlv_event_id {
WMI_PDEV_RAP_INFO_EVENTID,
WMI_CHAN_RF_CHARACTERIZATION_INFO_EVENTID,
WMI_SERVICE_READY_EXT2_EVENTID,
+ WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID =
+ WMI_SERVICE_READY_EXT2_EVENTID + 4,
WMI_VDEV_START_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_VDEV),
WMI_VDEV_STOPPED_EVENTID,
WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
@@ -747,6 +754,7 @@ enum wmi_tlv_event_id {
WMI_TBTTOFFSET_EXT_UPDATE_EVENTID,
WMI_OFFCHAN_DATA_TX_COMPLETION_EVENTID,
WMI_HOST_FILS_DISCOVERY_EVENTID,
+ WMI_MGMT_RX_FW_CONSUMED_EVENTID = WMI_HOST_FILS_DISCOVERY_EVENTID + 3,
WMI_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_CMD(WMI_GRP_BA_NEG),
WMI_TX_ADDBA_COMPLETE_EVENTID,
WMI_BA_RSP_SSN_EVENTID,
@@ -845,6 +853,8 @@ enum wmi_tlv_event_id {
WMI_MDNS_STATS_EVENTID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL),
WMI_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_CMD(WMI_GRP_SAP_OFL),
WMI_SAP_OFL_DEL_STA_EVENTID,
+ WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID =
+ WMI_EVT_GRP_START_ID(WMI_GRP_OBSS_OFL),
WMI_OCB_SET_CONFIG_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_OCB),
WMI_OCB_GET_TSF_TIMER_RESP_EVENTID,
WMI_DCC_GET_STATS_RESP_EVENTID,
@@ -874,6 +884,9 @@ enum wmi_tlv_event_id {
WMI_TWT_DEL_DIALOG_EVENTID,
WMI_TWT_PAUSE_DIALOG_EVENTID,
WMI_TWT_RESUME_DIALOG_EVENTID,
+ WMI_MLO_LINK_SET_ACTIVE_RESP_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MLO),
+ WMI_MLO_SETUP_COMPLETE_EVENTID,
+ WMI_MLO_TEARDOWN_COMPLETE_EVENTID,
};
enum wmi_tlv_pdev_param {
@@ -1929,6 +1942,22 @@ enum wmi_tlv_tag {
WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9,
WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT,
WMI_TAG_EHT_RATE_SET = 0x3C4,
+ WMI_TAG_DCS_AWGN_INT_TYPE = 0x3C5,
+ WMI_TAG_MLO_TX_SEND_PARAMS,
+ WMI_TAG_MLO_PARTNER_LINK_PARAMS,
+ WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC,
+ WMI_TAG_MLO_SETUP_CMD = 0x3C9,
+ WMI_TAG_MLO_SETUP_COMPLETE_EVENT,
+ WMI_TAG_MLO_READY_CMD,
+ WMI_TAG_MLO_TEARDOWN_CMD,
+ WMI_TAG_MLO_TEARDOWN_COMPLETE,
+ WMI_TAG_MLO_PEER_ASSOC_PARAMS = 0x3D0,
+ WMI_TAG_MLO_PEER_CREATE_PARAMS = 0x3D5,
+ WMI_TAG_MLO_VDEV_START_PARAMS = 0x3D6,
+ WMI_TAG_MLO_VDEV_CREATE_PARAMS = 0x3D7,
+ WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8,
+ WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD = 0x3D9,
+ WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD = 0x3FB,
WMI_TAG_MAX
};
@@ -2152,6 +2181,7 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213,
WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219,
WMI_TLV_SERVICE_EXT2_MSG = 220,
+ WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT = 253,
WMI_MAX_EXT_SERVICE = 256,
@@ -2199,8 +2229,11 @@ enum wmi_peer_param {
WMI_PEER_SET_MAX_TX_RATE = 17,
WMI_PEER_SET_MIN_TX_RATE = 18,
WMI_PEER_SET_DEFAULT_ROUTING = 19,
+ WMI_PEER_CHWIDTH_PUNCTURE_20MHZ_BITMAP = 39,
};
+#define WMI_PEER_PUNCTURE_BITMAP GENMASK(23, 8)
+
enum wmi_slot_time {
WMI_VDEV_SLOT_TIME_LONG = 1,
WMI_VDEV_SLOT_TIME_SHORT = 2,
@@ -2287,6 +2320,13 @@ struct ath12k_wmi_host_mem_chunk_arg {
u32 req_id;
};
+enum ath12k_peer_metadata_version {
+ ATH12K_PEER_METADATA_V0,
+ ATH12K_PEER_METADATA_V1,
+ ATH12K_PEER_METADATA_V1A,
+ ATH12K_PEER_METADATA_V1B
+};
+
struct ath12k_wmi_resource_config_arg {
u32 num_vdevs;
u32 num_peers;
@@ -2349,8 +2389,10 @@ struct ath12k_wmi_resource_config_arg {
u32 sched_params;
u32 twt_ap_pdev_count;
u32 twt_ap_sta_count;
+ enum ath12k_peer_metadata_version peer_metadata_ver;
+ u32 ema_max_vap_cnt;
+ u32 ema_max_profile_period;
bool is_reg_cc_ext_event_supported;
- u8 dp_peer_meta_data_ver;
};
struct ath12k_wmi_init_cmd_arg {
@@ -2404,6 +2446,8 @@ struct wmi_init_cmd {
#define WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT 4
#define WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION GENMASK(5, 4)
+#define WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 BIT(5)
+#define WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET BIT(9)
struct ath12k_wmi_resource_config_params {
__le32 tlv_header;
@@ -2604,6 +2648,19 @@ struct ath12k_wmi_soc_hal_reg_caps_params {
__le32 num_phy;
} __packed;
+enum wmi_channel_width {
+ WMI_CHAN_WIDTH_20 = 0,
+ WMI_CHAN_WIDTH_40 = 1,
+ WMI_CHAN_WIDTH_80 = 2,
+ WMI_CHAN_WIDTH_160 = 3,
+ WMI_CHAN_WIDTH_80P80 = 4,
+ WMI_CHAN_WIDTH_5 = 5,
+ WMI_CHAN_WIDTH_10 = 6,
+ WMI_CHAN_WIDTH_165 = 7,
+ WMI_CHAN_WIDTH_160P160 = 8,
+ WMI_CHAN_WIDTH_320 = 9,
+};
+
#define WMI_MAX_EHTCAP_MAC_SIZE 2
#define WMI_MAX_EHTCAP_PHY_SIZE 3
#define WMI_MAX_EHTCAP_RATE_SET 3
@@ -2659,6 +2716,8 @@ struct ath12k_wmi_caps_ext_params {
__le32 eht_cap_info_internal;
__le32 eht_supp_mcs_ext_2ghz[WMI_MAX_EHT_SUPP_MCS_2G_SIZE];
__le32 eht_supp_mcs_ext_5ghz[WMI_MAX_EHT_SUPP_MCS_5G_SIZE];
+ __le32 eml_capability;
+ __le32 mld_capability;
} __packed;
/* 2 word representation of MAC addr */
@@ -2707,6 +2766,9 @@ struct ath12k_wmi_vdev_create_arg {
} chains[NUM_NL80211_BANDS];
u32 pdev_id;
u8 if_stats_id;
+ u32 mbssid_flags;
+ u32 mbssid_tx_vdev_id;
+ u8 mld_addr[ETH_ALEN];
};
#define ATH12K_MAX_VDEV_STATS_ID 0x30
@@ -2728,9 +2790,36 @@ struct wmi_vdev_create_cmd {
struct ath12k_wmi_vdev_txrx_streams_params {
__le32 tlv_header;
- u32 band;
- u32 supported_tx_streams;
- u32 supported_rx_streams;
+ __le32 band;
+ __le32 supported_tx_streams;
+ __le32 supported_rx_streams;
+} __packed;
+
+struct wmi_vdev_create_mlo_params {
+ __le32 tlv_header;
+ struct ath12k_wmi_mac_addr_params mld_macaddr;
+} __packed;
+
+#define ATH12K_WMI_FLAG_MLO_ENABLED BIT(0)
+#define ATH12K_WMI_FLAG_MLO_ASSOC_LINK BIT(1)
+#define ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC BIT(2)
+#define ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID BIT(3)
+#define ATH12K_WMI_FLAG_MLO_PEER_ID_VALID BIT(4)
+#define ATH12K_WMI_FLAG_MLO_MCAST_VDEV BIT(5)
+#define ATH12K_WMI_FLAG_MLO_EMLSR_SUPPORT BIT(6)
+#define ATH12K_WMI_FLAG_MLO_FORCED_INACTIVE BIT(7)
+#define ATH12K_WMI_FLAG_MLO_LINK_ADD BIT(8)
+
+struct wmi_vdev_start_mlo_params {
+ __le32 tlv_header;
+ __le32 flags;
+} __packed;
+
+struct wmi_partner_link_info {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 hw_link_id;
+ struct ath12k_wmi_mac_addr_params vdev_addr;
} __packed;
struct wmi_vdev_delete_cmd {
@@ -2738,14 +2827,23 @@ struct wmi_vdev_delete_cmd {
__le32 vdev_id;
} __packed;
+struct ath12k_wmi_vdev_up_params {
+ u32 vdev_id;
+ u32 aid;
+ const u8 *bssid;
+ const u8 *tx_bssid;
+ u32 nontx_profile_idx;
+ u32 nontx_profile_cnt;
+};
+
struct wmi_vdev_up_cmd {
__le32 tlv_header;
__le32 vdev_id;
__le32 vdev_assoc_id;
struct ath12k_wmi_mac_addr_params vdev_bssid;
- struct ath12k_wmi_mac_addr_params trans_bssid;
- __le32 profile_idx;
- __le32 profile_num;
+ struct ath12k_wmi_mac_addr_params tx_vdev_bssid;
+ __le32 nontx_profile_idx;
+ __le32 nontx_profile_cnt;
} __packed;
struct wmi_vdev_stop_cmd {
@@ -2773,6 +2871,10 @@ struct ath12k_wmi_ssid_params {
enum wmi_vdev_mbssid_flags {
WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP = BIT(0),
+ WMI_VDEV_MBSSID_FLAGS_TRANSMIT_AP = BIT(1),
+ WMI_VDEV_MBSSID_FLAGS_NON_TRANSMIT_AP = BIT(2),
+ WMI_VDEV_MBSSID_FLAGS_EMA_MODE = BIT(3),
+ WMI_VDEV_MBSSID_FLAGS_SCAN_MODE_VAP = BIT(4),
};
struct wmi_vdev_start_request_cmd {
@@ -2863,6 +2965,27 @@ enum wmi_phy_mode {
MODE_MAX = 33,
};
+#define ATH12K_WMI_MLO_MAX_LINKS 4
+
+struct wmi_ml_partner_info {
+ u32 vdev_id;
+ u32 hw_link_id;
+ u8 addr[ETH_ALEN];
+ bool assoc_link;
+ bool primary_umac;
+ bool logical_link_idx_valid;
+ u32 logical_link_idx;
+};
+
+struct wmi_ml_arg {
+ bool enabled;
+ bool assoc_link;
+ bool mcast_link;
+ bool link_add;
+ u8 num_partner_links;
+ struct wmi_ml_partner_info partner_info[ATH12K_WMI_MLO_MAX_LINKS];
+};
+
struct wmi_vdev_start_req_arg {
u32 vdev_id;
u32 freq;
@@ -2900,12 +3023,19 @@ struct wmi_vdev_start_req_arg {
u32 mbssid_flags;
u32 mbssid_tx_vdev_id;
u32 punct_bitmap;
+ struct wmi_ml_arg ml;
};
struct ath12k_wmi_peer_create_arg {
const u8 *peer_addr;
u32 peer_type;
u32 vdev_id;
+ bool ml_enabled;
+};
+
+struct wmi_peer_create_mlo_params {
+ __le32 tlv_header;
+ __le32 flags;
};
struct ath12k_wmi_pdev_set_regdomain_arg {
@@ -3075,6 +3205,7 @@ struct wmi_pdev_bss_chan_info_req_cmd {
__le32 tlv_header;
/* ref wmi_bss_chan_info_req_type */
__le32 req_type;
+ __le32 pdev_id;
} __packed;
struct wmi_ap_ps_peer_cmd {
@@ -3357,34 +3488,6 @@ struct wmi_bssid_arg {
const u8 *bssid;
};
-struct wmi_start_scan_arg {
- u32 scan_id;
- u32 scan_req_id;
- u32 vdev_id;
- u32 scan_priority;
- u32 notify_scan_events;
- u32 dwell_time_active;
- u32 dwell_time_passive;
- u32 min_rest_time;
- u32 max_rest_time;
- u32 repeat_probe_time;
- u32 probe_spacing_time;
- u32 idle_time;
- u32 max_scan_time;
- u32 probe_delay;
- u32 scan_ctrl_flags;
-
- u32 ie_len;
- u32 n_channels;
- u32 n_ssids;
- u32 n_bssids;
-
- u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN];
- u32 channels[64];
- struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
- struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
-};
-
#define WMI_SCAN_STOP_ONE 0x00000000
#define WMI_SCAN_STOP_VAP_ALL 0x01000000
#define WMI_SCAN_STOP_ALL 0x04000000
@@ -3523,6 +3626,16 @@ struct ath12k_wmi_p2p_noa_info {
#define WMI_BEACON_TX_BUFFER_SIZE 512
+#define WMI_EMA_BEACON_CNT GENMASK(7, 0)
+#define WMI_EMA_BEACON_IDX GENMASK(15, 8)
+#define WMI_EMA_BEACON_FIRST GENMASK(23, 16)
+#define WMI_EMA_BEACON_LAST GENMASK(31, 24)
+
+struct ath12k_wmi_bcn_tmpl_ema_arg {
+ u8 bcn_cnt;
+ u8 bcn_index;
+};
+
struct wmi_bcn_tmpl_cmd {
__le32 tlv_header;
__le32 vdev_id;
@@ -3533,6 +3646,11 @@ struct wmi_bcn_tmpl_cmd {
__le32 csa_event_bitmap;
__le32 mbssid_ie_offset;
__le32 esp_ie_offset;
+ __le32 csc_switch_count_offset;
+ __le32 csc_event_bitmap;
+ __le32 mu_edca_ie_offset;
+ __le32 feature_enable_bitmap;
+ __le32 ema_params;
} __packed;
struct wmi_p2p_go_set_beacon_ie_cmd {
@@ -3584,6 +3702,24 @@ struct wmi_vdev_install_key_arg {
#define WMI_HECAP_TXRX_MCS_NSS_IDX_160 1
#define WMI_HECAP_TXRX_MCS_NSS_IDX_80_80 2
+#define ATH12K_WMI_MLO_MAX_PARTNER_LINKS \
+ (ATH12K_WMI_MLO_MAX_LINKS + ATH12K_MAX_NUM_BRIDGE_LINKS - 1)
+
+struct peer_assoc_mlo_params {
+ bool enabled;
+ bool assoc_link;
+ bool primary_umac;
+ bool peer_id_valid;
+ bool logical_link_idx_valid;
+ bool bridge_peer;
+ u8 mld_addr[ETH_ALEN];
+ u32 logical_link_idx;
+ u32 ml_peer_id;
+ u32 ieee_link_id;
+ u8 num_partner_links;
+ struct wmi_ml_partner_info partner_info[ATH12K_WMI_MLO_MAX_LINKS];
+};
+
struct wmi_rate_set_arg {
u32 num_rates;
u8 rates[WMI_MAX_SUPPORTED_RATES];
@@ -3658,8 +3794,36 @@ struct ath12k_wmi_peer_assoc_arg {
u32 peer_eht_tx_mcs_set[WMI_MAX_EHTCAP_RATE_SET];
struct ath12k_wmi_ppe_threshold_arg peer_eht_ppet;
u32 punct_bitmap;
+ bool is_assoc;
+ struct peer_assoc_mlo_params ml;
};
+#define ATH12K_WMI_FLAG_MLO_ENABLED BIT(0)
+#define ATH12K_WMI_FLAG_MLO_ASSOC_LINK BIT(1)
+#define ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC BIT(2)
+#define ATH12K_WMI_FLAG_MLO_LINK_ID_VALID BIT(3)
+#define ATH12K_WMI_FLAG_MLO_PEER_ID_VALID BIT(4)
+
+struct wmi_peer_assoc_mlo_partner_info_params {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 hw_link_id;
+ __le32 flags;
+ __le32 logical_link_idx;
+} __packed;
+
+struct wmi_peer_assoc_mlo_params {
+ __le32 tlv_header;
+ __le32 flags;
+ struct ath12k_wmi_mac_addr_params mld_addr;
+ __le32 logical_link_idx;
+ __le32 ml_peer_id;
+ __le32 ieee_link_id;
+ __le32 emlsr_trans_timeout_us;
+ __le32 emlsr_trans_delay_us;
+ __le32 emlsr_padding_delay_us;
+} __packed;
+
struct wmi_peer_assoc_complete_cmd {
__le32 tlv_header;
struct ath12k_wmi_mac_addr_params peer_macaddr;
@@ -3909,7 +4073,6 @@ struct ath12k_wmi_eht_rate_set_params {
#define MAX_REG_RULES 10
#define REG_ALPHA2_LEN 2
#define MAX_6G_REG_RULES 5
-#define REG_US_5G_NUM_REG_RULES 4
enum wmi_start_event_param {
WMI_VDEV_START_RESP_EVENT = 0,
@@ -4052,7 +4215,6 @@ struct wmi_vdev_stopped_event {
} __packed;
struct wmi_pdev_bss_chan_info_event {
- __le32 pdev_id;
__le32 freq; /* Units in MHz */
__le32 noise_floor; /* units are dBm */
/* rx clear - how often the channel was unused */
@@ -4070,6 +4232,7 @@ struct wmi_pdev_bss_chan_info_event {
/*rx_cycle cnt for my bss in 64bits format */
__le32 rx_bss_cycle_count_low;
__le32 rx_bss_cycle_count_high;
+ __le32 pdev_id;
} __packed;
#define WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS 0
@@ -4214,6 +4377,9 @@ struct wmi_peer_sta_kickout_event {
struct ath12k_wmi_mac_addr_params peer_macaddr;
} __packed;
+#define WMI_ROAM_REASON_MASK GENMASK(3, 0)
+#define WMI_ROAM_SUBNET_STATUS_MASK GENMASK(5, 4)
+
enum wmi_roam_reason {
WMI_ROAM_REASON_BETTER_AP = 1,
WMI_ROAM_REASON_BEACON_MISS = 2,
@@ -4776,10 +4942,9 @@ struct wmi_probe_tmpl_cmd {
__le32 buf_len;
} __packed;
-#define WMI_MAX_MEM_REQS 32
-
-#define MAX_RADIOS 3
+#define MAX_RADIOS 2
+#define WMI_MLO_CMD_TIMEOUT_HZ (5 * HZ)
#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
#define WMI_SEND_TIMEOUT_HZ (3 * HZ)
@@ -4808,6 +4973,37 @@ struct ath12k_wmi_base {
struct ath12k_wmi_target_cap_arg *targ_cap;
};
+struct wmi_pdev_set_bios_interface_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 param_type_id;
+ __le32 length;
+} __packed;
+
+enum wmi_bios_param_type {
+ WMI_BIOS_PARAM_CCA_THRESHOLD_TYPE = 0,
+ WMI_BIOS_PARAM_TAS_CONFIG_TYPE = 1,
+ WMI_BIOS_PARAM_TAS_DATA_TYPE = 2,
+
+ /* bandedge control power */
+ WMI_BIOS_PARAM_TYPE_BANDEDGE = 3,
+
+ WMI_BIOS_PARAM_TYPE_MAX,
+};
+
+struct wmi_pdev_set_bios_sar_table_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 sar_len;
+ __le32 dbs_backoff_len;
+} __packed;
+
+struct wmi_pdev_set_bios_geo_table_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 geo_len;
+} __packed;
+
#define ATH12K_FW_STATS_BUF_SIZE (1024 * 1024)
enum wmi_sys_cap_info_flags {
@@ -4845,6 +5041,593 @@ struct wmi_twt_disable_event {
__le32 status;
} __packed;
+struct wmi_mlo_setup_cmd {
+ __le32 tlv_header;
+ __le32 mld_group_id;
+ __le32 pdev_id;
+} __packed;
+
+struct wmi_mlo_setup_arg {
+ __le32 group_id;
+ u8 num_partner_links;
+ u8 *partner_link_id;
+};
+
+struct wmi_mlo_ready_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+} __packed;
+
+enum wmi_mlo_tear_down_reason_code_type {
+ WMI_MLO_TEARDOWN_SSR_REASON,
+};
+
+struct wmi_mlo_teardown_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 reason_code;
+} __packed;
+
+struct wmi_mlo_setup_complete_event {
+ __le32 pdev_id;
+ __le32 status;
+} __packed;
+
+struct wmi_mlo_teardown_complete_event {
+ __le32 pdev_id;
+ __le32 status;
+} __packed;
+
+/* WOW structures */
+enum wmi_wow_wakeup_event {
+ WOW_BMISS_EVENT = 0,
+ WOW_BETTER_AP_EVENT,
+ WOW_DEAUTH_RECVD_EVENT,
+ WOW_MAGIC_PKT_RECVD_EVENT,
+ WOW_GTK_ERR_EVENT,
+ WOW_FOURWAY_HSHAKE_EVENT,
+ WOW_EAPOL_RECVD_EVENT,
+ WOW_NLO_DETECTED_EVENT,
+ WOW_DISASSOC_RECVD_EVENT,
+ WOW_PATTERN_MATCH_EVENT,
+ WOW_CSA_IE_EVENT,
+ WOW_PROBE_REQ_WPS_IE_EVENT,
+ WOW_AUTH_REQ_EVENT,
+ WOW_ASSOC_REQ_EVENT,
+ WOW_HTT_EVENT,
+ WOW_RA_MATCH_EVENT,
+ WOW_HOST_AUTO_SHUTDOWN_EVENT,
+ WOW_IOAC_MAGIC_EVENT,
+ WOW_IOAC_SHORT_EVENT,
+ WOW_IOAC_EXTEND_EVENT,
+ WOW_IOAC_TIMER_EVENT,
+ WOW_DFS_PHYERR_RADAR_EVENT,
+ WOW_BEACON_EVENT,
+ WOW_CLIENT_KICKOUT_EVENT,
+ WOW_EVENT_MAX,
+};
+
+enum wmi_wow_interface_cfg {
+ WOW_IFACE_PAUSE_ENABLED,
+ WOW_IFACE_PAUSE_DISABLED
+};
+
+#define C2S(x) case x: return #x
+
+static inline const char *wow_wakeup_event(enum wmi_wow_wakeup_event ev)
+{
+ switch (ev) {
+ C2S(WOW_BMISS_EVENT);
+ C2S(WOW_BETTER_AP_EVENT);
+ C2S(WOW_DEAUTH_RECVD_EVENT);
+ C2S(WOW_MAGIC_PKT_RECVD_EVENT);
+ C2S(WOW_GTK_ERR_EVENT);
+ C2S(WOW_FOURWAY_HSHAKE_EVENT);
+ C2S(WOW_EAPOL_RECVD_EVENT);
+ C2S(WOW_NLO_DETECTED_EVENT);
+ C2S(WOW_DISASSOC_RECVD_EVENT);
+ C2S(WOW_PATTERN_MATCH_EVENT);
+ C2S(WOW_CSA_IE_EVENT);
+ C2S(WOW_PROBE_REQ_WPS_IE_EVENT);
+ C2S(WOW_AUTH_REQ_EVENT);
+ C2S(WOW_ASSOC_REQ_EVENT);
+ C2S(WOW_HTT_EVENT);
+ C2S(WOW_RA_MATCH_EVENT);
+ C2S(WOW_HOST_AUTO_SHUTDOWN_EVENT);
+ C2S(WOW_IOAC_MAGIC_EVENT);
+ C2S(WOW_IOAC_SHORT_EVENT);
+ C2S(WOW_IOAC_EXTEND_EVENT);
+ C2S(WOW_IOAC_TIMER_EVENT);
+ C2S(WOW_DFS_PHYERR_RADAR_EVENT);
+ C2S(WOW_BEACON_EVENT);
+ C2S(WOW_CLIENT_KICKOUT_EVENT);
+ C2S(WOW_EVENT_MAX);
+ default:
+ return NULL;
+ }
+}
+
+enum wmi_wow_wake_reason {
+ WOW_REASON_UNSPECIFIED = -1,
+ WOW_REASON_NLOD = 0,
+ WOW_REASON_AP_ASSOC_LOST,
+ WOW_REASON_LOW_RSSI,
+ WOW_REASON_DEAUTH_RECVD,
+ WOW_REASON_DISASSOC_RECVD,
+ WOW_REASON_GTK_HS_ERR,
+ WOW_REASON_EAP_REQ,
+ WOW_REASON_FOURWAY_HS_RECV,
+ WOW_REASON_TIMER_INTR_RECV,
+ WOW_REASON_PATTERN_MATCH_FOUND,
+ WOW_REASON_RECV_MAGIC_PATTERN,
+ WOW_REASON_P2P_DISC,
+ WOW_REASON_WLAN_HB,
+ WOW_REASON_CSA_EVENT,
+ WOW_REASON_PROBE_REQ_WPS_IE_RECV,
+ WOW_REASON_AUTH_REQ_RECV,
+ WOW_REASON_ASSOC_REQ_RECV,
+ WOW_REASON_HTT_EVENT,
+ WOW_REASON_RA_MATCH,
+ WOW_REASON_HOST_AUTO_SHUTDOWN,
+ WOW_REASON_IOAC_MAGIC_EVENT,
+ WOW_REASON_IOAC_SHORT_EVENT,
+ WOW_REASON_IOAC_EXTEND_EVENT,
+ WOW_REASON_IOAC_TIMER_EVENT,
+ WOW_REASON_ROAM_HO,
+ WOW_REASON_DFS_PHYERR_RADADR_EVENT,
+ WOW_REASON_BEACON_RECV,
+ WOW_REASON_CLIENT_KICKOUT_EVENT,
+ WOW_REASON_PAGE_FAULT = 0x3a,
+ WOW_REASON_DEBUG_TEST = 0xFF,
+};
+
+static inline const char *wow_reason(enum wmi_wow_wake_reason reason)
+{
+ switch (reason) {
+ C2S(WOW_REASON_UNSPECIFIED);
+ C2S(WOW_REASON_NLOD);
+ C2S(WOW_REASON_AP_ASSOC_LOST);
+ C2S(WOW_REASON_LOW_RSSI);
+ C2S(WOW_REASON_DEAUTH_RECVD);
+ C2S(WOW_REASON_DISASSOC_RECVD);
+ C2S(WOW_REASON_GTK_HS_ERR);
+ C2S(WOW_REASON_EAP_REQ);
+ C2S(WOW_REASON_FOURWAY_HS_RECV);
+ C2S(WOW_REASON_TIMER_INTR_RECV);
+ C2S(WOW_REASON_PATTERN_MATCH_FOUND);
+ C2S(WOW_REASON_RECV_MAGIC_PATTERN);
+ C2S(WOW_REASON_P2P_DISC);
+ C2S(WOW_REASON_WLAN_HB);
+ C2S(WOW_REASON_CSA_EVENT);
+ C2S(WOW_REASON_PROBE_REQ_WPS_IE_RECV);
+ C2S(WOW_REASON_AUTH_REQ_RECV);
+ C2S(WOW_REASON_ASSOC_REQ_RECV);
+ C2S(WOW_REASON_HTT_EVENT);
+ C2S(WOW_REASON_RA_MATCH);
+ C2S(WOW_REASON_HOST_AUTO_SHUTDOWN);
+ C2S(WOW_REASON_IOAC_MAGIC_EVENT);
+ C2S(WOW_REASON_IOAC_SHORT_EVENT);
+ C2S(WOW_REASON_IOAC_EXTEND_EVENT);
+ C2S(WOW_REASON_IOAC_TIMER_EVENT);
+ C2S(WOW_REASON_ROAM_HO);
+ C2S(WOW_REASON_DFS_PHYERR_RADADR_EVENT);
+ C2S(WOW_REASON_BEACON_RECV);
+ C2S(WOW_REASON_CLIENT_KICKOUT_EVENT);
+ C2S(WOW_REASON_PAGE_FAULT);
+ C2S(WOW_REASON_DEBUG_TEST);
+ default:
+ return NULL;
+ }
+}
+
+#undef C2S
+
+#define WOW_DEFAULT_BITMAP_PATTERN_SIZE 148
+#define WOW_DEFAULT_BITMASK_SIZE 148
+
+#define WOW_MIN_PATTERN_SIZE 1
+#define WOW_MAX_PATTERN_SIZE 148
+#define WOW_MAX_PKT_OFFSET 128
+#define WOW_HDR_LEN (sizeof(struct ieee80211_hdr_3addr) + \
+ sizeof(struct rfc1042_hdr))
+#define WOW_MAX_REDUCE (WOW_HDR_LEN - sizeof(struct ethhdr) - \
+ offsetof(struct ieee80211_hdr_3addr, addr1))
+
+struct wmi_wow_bitmap_pattern_params {
+ __le32 tlv_header;
+ u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE];
+ u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE];
+ __le32 pattern_offset;
+ __le32 pattern_len;
+ __le32 bitmask_len;
+ __le32 pattern_id;
+} __packed;
+
+struct wmi_wow_add_pattern_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 pattern_id;
+ __le32 pattern_type;
+} __packed;
+
+struct wmi_wow_del_pattern_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 pattern_id;
+ __le32 pattern_type;
+} __packed;
+
+enum wmi_tlv_pattern_type {
+ WOW_PATTERN_MIN = 0,
+ WOW_BITMAP_PATTERN = WOW_PATTERN_MIN,
+ WOW_IPV4_SYNC_PATTERN,
+ WOW_IPV6_SYNC_PATTERN,
+ WOW_WILD_CARD_PATTERN,
+ WOW_TIMER_PATTERN,
+ WOW_MAGIC_PATTERN,
+ WOW_IPV6_RA_PATTERN,
+ WOW_IOAC_PKT_PATTERN,
+ WOW_IOAC_TMR_PATTERN,
+ WOW_PATTERN_MAX
+};
+
+struct wmi_wow_add_del_event_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 is_add;
+ __le32 event_bitmap;
+} __packed;
+
+struct wmi_wow_enable_cmd {
+ __le32 tlv_header;
+ __le32 enable;
+ __le32 pause_iface_config;
+ __le32 flags;
+} __packed;
+
+struct wmi_wow_host_wakeup_cmd {
+ __le32 tlv_header;
+ __le32 reserved;
+} __packed;
+
+struct wmi_wow_ev_param {
+ __le32 vdev_id;
+ __le32 flag;
+ __le32 wake_reason;
+ __le32 data_len;
+} __packed;
+
+struct wmi_wow_ev_pg_fault_param {
+ __le32 len;
+ u8 data[];
+} __packed;
+
+struct wmi_wow_ev_arg {
+ enum wmi_wow_wake_reason wake_reason;
+};
+
+#define WMI_PNO_MAX_SCHED_SCAN_PLANS 2
+#define WMI_PNO_MAX_SCHED_SCAN_PLAN_INT 7200
+#define WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS 100
+#define WMI_PNO_MAX_NETW_CHANNELS 26
+#define WMI_PNO_MAX_NETW_CHANNELS_EX 60
+#define WMI_PNO_MAX_SUPP_NETWORKS WLAN_SCAN_PARAMS_MAX_SSID
+#define WMI_PNO_MAX_IE_LENGTH WLAN_SCAN_PARAMS_MAX_IE_LEN
+
+/* size based of dot11 declaration without extra IEs as we will not carry those for PNO */
+#define WMI_PNO_MAX_PB_REQ_SIZE 450
+
+#define WMI_PNO_24GHZ_DEFAULT_CH 1
+#define WMI_PNO_5GHZ_DEFAULT_CH 36
+
+#define WMI_ACTIVE_MAX_CHANNEL_TIME 40
+#define WMI_PASSIVE_MAX_CHANNEL_TIME 110
+
+/* SSID broadcast type */
+enum wmi_ssid_bcast_type {
+ BCAST_UNKNOWN = 0,
+ BCAST_NORMAL = 1,
+ BCAST_HIDDEN = 2,
+};
+
+#define WMI_NLO_MAX_SSIDS 16
+#define WMI_NLO_MAX_CHAN 48
+
+#define WMI_NLO_CONFIG_STOP BIT(0)
+#define WMI_NLO_CONFIG_START BIT(1)
+#define WMI_NLO_CONFIG_RESET BIT(2)
+#define WMI_NLO_CONFIG_SLOW_SCAN BIT(4)
+#define WMI_NLO_CONFIG_FAST_SCAN BIT(5)
+#define WMI_NLO_CONFIG_SSID_HIDE_EN BIT(6)
+
+/* This bit is used to indicate if EPNO or supplicant PNO is enabled.
+ * Only one of them can be enabled at a given time
+ */
+#define WMI_NLO_CONFIG_ENLO BIT(7)
+#define WMI_NLO_CONFIG_SCAN_PASSIVE BIT(8)
+#define WMI_NLO_CONFIG_ENLO_RESET BIT(9)
+#define WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ BIT(10)
+#define WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ BIT(11)
+#define WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ BIT(12)
+#define WMI_NLO_CONFIG_ENABLE_CNLO_RSSI_CONFIG BIT(13)
+
+struct wmi_nlo_ssid_params {
+ __le32 valid;
+ struct ath12k_wmi_ssid_params ssid;
+} __packed;
+
+struct wmi_nlo_enc_params {
+ __le32 valid;
+ __le32 enc_type;
+} __packed;
+
+struct wmi_nlo_auth_params {
+ __le32 valid;
+ __le32 auth_type;
+} __packed;
+
+struct wmi_nlo_bcast_nw_params {
+ __le32 valid;
+ __le32 bcast_nw_type;
+} __packed;
+
+struct wmi_nlo_rssi_params {
+ __le32 valid;
+ __le32 rssi;
+} __packed;
+
+struct nlo_configured_params {
+ /* TLV tag and len;*/
+ __le32 tlv_header;
+ struct wmi_nlo_ssid_params ssid;
+ struct wmi_nlo_enc_params enc_type;
+ struct wmi_nlo_auth_params auth_type;
+ struct wmi_nlo_rssi_params rssi_cond;
+
+ /* indicates if the SSID is hidden or not */
+ struct wmi_nlo_bcast_nw_params bcast_nw_type;
+} __packed;
+
+struct wmi_network_type_arg {
+ struct cfg80211_ssid ssid;
+ u32 authentication;
+ u32 encryption;
+ u32 bcast_nw_type;
+ u8 channel_count;
+ u16 channels[WMI_PNO_MAX_NETW_CHANNELS_EX];
+ s32 rssi_threshold;
+};
+
+struct wmi_pno_scan_req_arg {
+ u8 enable;
+ u8 vdev_id;
+ u8 uc_networks_count;
+ struct wmi_network_type_arg a_networks[WMI_PNO_MAX_SUPP_NETWORKS];
+ u32 fast_scan_period;
+ u32 slow_scan_period;
+ u8 fast_scan_max_cycles;
+
+ bool do_passive_scan;
+
+ u32 delay_start_time;
+ u32 active_min_time;
+ u32 active_max_time;
+ u32 passive_min_time;
+ u32 passive_max_time;
+
+ /* mac address randomization attributes */
+ u32 enable_pno_scan_randomization;
+ u8 mac_addr[ETH_ALEN];
+ u8 mac_addr_mask[ETH_ALEN];
+};
+
+struct wmi_wow_nlo_config_cmd {
+ __le32 tlv_header;
+ __le32 flags;
+ __le32 vdev_id;
+ __le32 fast_scan_max_cycles;
+ __le32 active_dwell_time;
+ __le32 passive_dwell_time;
+ __le32 probe_bundle_size;
+
+ /* ART = IRT */
+ __le32 rest_time;
+
+ /* max value that can be reached after scan_backoff_multiplier */
+ __le32 max_rest_time;
+
+ __le32 scan_backoff_multiplier;
+ __le32 fast_scan_period;
+
+ /* specific to windows */
+ __le32 slow_scan_period;
+
+ __le32 no_of_ssids;
+
+ __le32 num_of_channels;
+
+ /* NLO scan start delay time in milliseconds */
+ __le32 delay_start_time;
+
+ /* MAC Address to use in Probe Req as SA */
+ struct ath12k_wmi_mac_addr_params mac_addr;
+
+ /* Mask on which MAC has to be randomized */
+ struct ath12k_wmi_mac_addr_params mac_mask;
+
+ /* IE bitmap to use in Probe Req */
+ __le32 ie_bitmap[8];
+
+ /* Number of vendor OUIs. In the TLV vendor_oui[] */
+ __le32 num_vendor_oui;
+
+ /* Number of connected NLO band preferences */
+ __le32 num_cnlo_band_pref;
+
+ /* The TLVs will follow.
+ * nlo_configured_params nlo_list[];
+ * u32 channel_list[num_of_channels];
+ */
+} __packed;
+
+/* Definition of HW data filtering */
+enum hw_data_filter_type {
+ WMI_HW_DATA_FILTER_DROP_NON_ARP_BC = BIT(0),
+ WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC = BIT(1),
+};
+
+struct wmi_hw_data_filter_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 enable;
+ __le32 hw_filter_bitmap;
+} __packed;
+
+struct wmi_hw_data_filter_arg {
+ u32 vdev_id;
+ bool enable;
+ u32 hw_filter_bitmap;
+};
+
+#define WMI_IPV6_UC_TYPE 0
+#define WMI_IPV6_AC_TYPE 1
+
+#define WMI_IPV6_MAX_COUNT 16
+#define WMI_IPV4_MAX_COUNT 2
+
+struct wmi_arp_ns_offload_arg {
+ u8 ipv4_addr[WMI_IPV4_MAX_COUNT][4];
+ u32 ipv4_count;
+ u32 ipv6_count;
+ u8 ipv6_addr[WMI_IPV6_MAX_COUNT][16];
+ u8 self_ipv6_addr[WMI_IPV6_MAX_COUNT][16];
+ u8 ipv6_type[WMI_IPV6_MAX_COUNT];
+ bool ipv6_valid[WMI_IPV6_MAX_COUNT];
+ u8 mac_addr[ETH_ALEN];
+};
+
+#define WMI_MAX_NS_OFFLOADS 2
+#define WMI_MAX_ARP_OFFLOADS 2
+
+#define WMI_ARPOL_FLAGS_VALID BIT(0)
+#define WMI_ARPOL_FLAGS_MAC_VALID BIT(1)
+#define WMI_ARPOL_FLAGS_REMOTE_IP_VALID BIT(2)
+
+struct wmi_arp_offload_params {
+ __le32 tlv_header;
+ __le32 flags;
+ u8 target_ipaddr[4];
+ u8 remote_ipaddr[4];
+ struct ath12k_wmi_mac_addr_params target_mac;
+} __packed;
+
+#define WMI_NSOL_FLAGS_VALID BIT(0)
+#define WMI_NSOL_FLAGS_MAC_VALID BIT(1)
+#define WMI_NSOL_FLAGS_REMOTE_IP_VALID BIT(2)
+#define WMI_NSOL_FLAGS_IS_IPV6_ANYCAST BIT(3)
+
+#define WMI_NSOL_MAX_TARGET_IPS 2
+
+struct wmi_ns_offload_params {
+ __le32 tlv_header;
+ __le32 flags;
+ u8 target_ipaddr[WMI_NSOL_MAX_TARGET_IPS][16];
+ u8 solicitation_ipaddr[16];
+ u8 remote_ipaddr[16];
+ struct ath12k_wmi_mac_addr_params target_mac;
+} __packed;
+
+struct wmi_set_arp_ns_offload_cmd {
+ __le32 tlv_header;
+ __le32 flags;
+ __le32 vdev_id;
+ __le32 num_ns_ext_tuples;
+ /* The TLVs follow:
+ * wmi_ns_offload_params ns[WMI_MAX_NS_OFFLOADS];
+ * wmi_arp_offload_params arp[WMI_MAX_ARP_OFFLOADS];
+ * wmi_ns_offload_params ns_ext[num_ns_ext_tuples];
+ */
+} __packed;
+
+#define GTK_OFFLOAD_OPCODE_MASK 0xFF000000
+#define GTK_OFFLOAD_ENABLE_OPCODE 0x01000000
+#define GTK_OFFLOAD_DISABLE_OPCODE 0x02000000
+#define GTK_OFFLOAD_REQUEST_STATUS_OPCODE 0x04000000
+
+#define GTK_OFFLOAD_KEK_BYTES 16
+#define GTK_OFFLOAD_KCK_BYTES 16
+#define GTK_REPLAY_COUNTER_BYTES 8
+#define WMI_MAX_KEY_LEN 32
+#define IGTK_PN_SIZE 6
+
+struct wmi_gtk_offload_status_event {
+ __le32 vdev_id;
+ __le32 flags;
+ __le32 refresh_cnt;
+ __le64 replay_ctr;
+ u8 igtk_key_index;
+ u8 igtk_key_length;
+ u8 igtk_key_rsc[IGTK_PN_SIZE];
+ u8 igtk_key[WMI_MAX_KEY_LEN];
+ u8 gtk_key_index;
+ u8 gtk_key_length;
+ u8 gtk_key_rsc[GTK_REPLAY_COUNTER_BYTES];
+ u8 gtk_key[WMI_MAX_KEY_LEN];
+} __packed;
+
+struct wmi_gtk_rekey_offload_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 flags;
+ u8 kek[GTK_OFFLOAD_KEK_BYTES];
+ u8 kck[GTK_OFFLOAD_KCK_BYTES];
+ u8 replay_ctr[GTK_REPLAY_COUNTER_BYTES];
+} __packed;
+
+struct wmi_sta_keepalive_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 enabled;
+
+ /* WMI_STA_KEEPALIVE_METHOD_ */
+ __le32 method;
+
+ /* in seconds */
+ __le32 interval;
+
+ /* following this structure is the TLV for struct
+ * wmi_sta_keepalive_arp_resp_params
+ */
+} __packed;
+
+struct wmi_sta_keepalive_arp_resp_params {
+ __le32 tlv_header;
+ __le32 src_ip4_addr;
+ __le32 dest_ip4_addr;
+ struct ath12k_wmi_mac_addr_params dest_mac_addr;
+} __packed;
+
+struct wmi_sta_keepalive_arg {
+ u32 vdev_id;
+ u32 enabled;
+ u32 method;
+ u32 interval;
+ u32 src_ip4_addr;
+ u32 dest_ip4_addr;
+ const u8 dest_mac_addr[ETH_ALEN];
+};
+
+enum wmi_sta_keepalive_method {
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME = 1,
+ WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE = 2,
+ WMI_STA_KEEPALIVE_METHOD_ETHERNET_LOOPBACK = 3,
+ WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST = 4,
+ WMI_STA_KEEPALIVE_METHOD_MGMT_VENDOR_ACTION = 5,
+};
+
+#define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30
+#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
+
void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config);
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -4858,10 +5641,10 @@ int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
const u8 *p2p_ie);
int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
- struct sk_buff *bcn);
+ struct sk_buff *bcn,
+ struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args);
int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id);
-int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid,
- const u8 *bssid);
+int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params);
int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id);
int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
bool restart);
@@ -4966,6 +5749,10 @@ int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
struct sk_buff *tmpl);
int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
enum wmi_host_hw_mode_config_type mode);
+int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id,
+ const u8 *buf, size_t buf_len);
+int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table);
+int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table);
static inline u32
ath12k_wmi_caps_ext_get_pdev_id(const struct ath12k_wmi_caps_ext_params *param)
@@ -4993,4 +5780,31 @@ ath12k_wmi_mac_phy_get_hw_link_id(const struct ath12k_wmi_mac_phy_caps_params *p
WMI_CAPS_PARAMS_HW_LINK_ID);
}
+int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar);
+int ath12k_wmi_wow_enable(struct ath12k *ar);
+int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id);
+int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id,
+ const u8 *pattern, const u8 *mask,
+ int pattern_len, int pattern_offset);
+int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable);
+int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req_arg *pno_scan);
+int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar,
+ struct wmi_hw_data_filter_arg *arg);
+int ath12k_wmi_arp_ns_offload(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct wmi_arp_ns_offload_arg *offload,
+ bool enable);
+int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
+ struct ath12k_link_vif *arvif, bool enable);
+int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
+ struct ath12k_link_vif *arvif);
+int ath12k_wmi_sta_keepalive(struct ath12k *ar,
+ const struct wmi_sta_keepalive_arg *arg);
+int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params);
+int ath12k_wmi_mlo_ready(struct ath12k *ar);
+int ath12k_wmi_mlo_teardown(struct ath12k *ar);
+
#endif
diff --git a/drivers/net/wireless/ath/ath12k/wow.c b/drivers/net/wireless/ath/ath12k/wow.c
new file mode 100644
index 000000000000..9e1c0bfd212f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wow.c
@@ -0,0 +1,1027 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/inetdevice.h>
+#include <net/addrconf.h>
+#include <net/if_inet6.h>
+#include <net/ipv6.h>
+
+#include "mac.h"
+
+#include <net/mac80211.h>
+#include "core.h"
+#include "hif.h"
+#include "debug.h"
+#include "wmi.h"
+#include "wow.h"
+
+static const struct wiphy_wowlan_support ath12k_wowlan_support = {
+ .flags = WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE,
+ .pattern_min_len = WOW_MIN_PATTERN_SIZE,
+ .pattern_max_len = WOW_MAX_PATTERN_SIZE,
+ .max_pkt_offset = WOW_MAX_PKT_OFFSET,
+};
+
+static inline bool ath12k_wow_is_p2p_vdev(struct ath12k_vif *ahvif)
+{
+ return (ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_DEVICE ||
+ ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_CLIENT ||
+ ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_GO);
+}
+
+int ath12k_wow_enable(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
+ int i, ret;
+
+ clear_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
+
+ /* The firmware might be busy and it can not enter WoW immediately.
+ * In that case firmware notifies host with
+ * ATH12K_HTC_MSG_NACK_SUSPEND message, asking host to try again
+ * later. Per the firmware team there could be up to 10 loops.
+ */
+ for (i = 0; i < ATH12K_WOW_RETRY_NUM; i++) {
+ reinit_completion(&ab->htc_suspend);
+
+ ret = ath12k_wmi_wow_enable(ar);
+ if (ret) {
+ ath12k_warn(ab, "failed to issue wow enable: %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ);
+ if (ret == 0) {
+ ath12k_warn(ab,
+ "timed out while waiting for htc suspend completion\n");
+ return -ETIMEDOUT;
+ }
+
+ if (test_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags))
+ /* success, suspend complete received */
+ return 0;
+
+ ath12k_warn(ab, "htc suspend not complete, retrying (try %d)\n",
+ i);
+ msleep(ATH12K_WOW_RETRY_WAIT_MS);
+ }
+
+ ath12k_warn(ab, "htc suspend not complete, failing after %d tries\n", i);
+
+ return -ETIMEDOUT;
+}
+
+int ath12k_wow_wakeup(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
+ int ret;
+
+ reinit_completion(&ab->wow.wakeup_completed);
+
+ ret = ath12k_wmi_wow_host_wakeup_ind(ar);
+ if (ret) {
+ ath12k_warn(ab, "failed to send wow wakeup indication: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
+ if (ret == 0) {
+ ath12k_warn(ab, "timed out while waiting for wow wakeup completion\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_vif_cleanup(struct ath12k_link_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+ int i, ret;
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ ret = ath12k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < ar->wow.max_num_patterns; i++) {
+ ret = ath12k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n",
+ i, arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_cleanup(struct ath12k *ar)
+{
+ struct ath12k_link_vif *arvif;
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath12k_wow_vif_cleanup(arvif);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Convert a 802.3 format to a 802.11 format.
+ * +------------+-----------+--------+----------------+
+ * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... |
+ * +------------+-----------+--------+----------------+
+ * |__ |_______ |____________ |________
+ * | | | |
+ * +--+------------+----+-----------+---------------+-----------+
+ * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
+ * +--+------------+----+-----------+---------------+-----------+
+ */
+static void
+ath12k_wow_convert_8023_to_80211(struct ath12k *ar,
+ const struct cfg80211_pkt_pattern *eth_pattern,
+ struct ath12k_pkt_pattern *i80211_pattern)
+{
+ size_t r1042_eth_ofs = offsetof(struct rfc1042_hdr, eth_type);
+ size_t a1_ofs = offsetof(struct ieee80211_hdr_3addr, addr1);
+ size_t a3_ofs = offsetof(struct ieee80211_hdr_3addr, addr3);
+ size_t i80211_hdr_len = sizeof(struct ieee80211_hdr_3addr);
+ size_t prot_ofs = offsetof(struct ethhdr, h_proto);
+ size_t src_ofs = offsetof(struct ethhdr, h_source);
+ u8 eth_bytemask[WOW_MAX_PATTERN_SIZE] = {};
+ const u8 *eth_pat = eth_pattern->pattern;
+ size_t eth_pat_len = eth_pattern->pattern_len;
+ size_t eth_pkt_ofs = eth_pattern->pkt_offset;
+ u8 *bytemask = i80211_pattern->bytemask;
+ u8 *pat = i80211_pattern->pattern;
+ size_t pat_len = 0;
+ size_t pkt_ofs = 0;
+ size_t delta;
+ int i;
+
+ /* convert bitmask to bytemask */
+ for (i = 0; i < eth_pat_len; i++)
+ if (eth_pattern->mask[i / 8] & BIT(i % 8))
+ eth_bytemask[i] = 0xff;
+
+ if (eth_pkt_ofs < ETH_ALEN) {
+ pkt_ofs = eth_pkt_ofs + a1_ofs;
+
+ if (size_add(eth_pkt_ofs, eth_pat_len) < ETH_ALEN) {
+ memcpy(pat, eth_pat, eth_pat_len);
+ memcpy(bytemask, eth_bytemask, eth_pat_len);
+
+ pat_len = eth_pat_len;
+ } else if (size_add(eth_pkt_ofs, eth_pat_len) < prot_ofs) {
+ memcpy(pat, eth_pat, ETH_ALEN - eth_pkt_ofs);
+ memcpy(bytemask, eth_bytemask, ETH_ALEN - eth_pkt_ofs);
+
+ delta = eth_pkt_ofs + eth_pat_len - src_ofs;
+ memcpy(pat + a3_ofs - pkt_ofs,
+ eth_pat + ETH_ALEN - eth_pkt_ofs,
+ delta);
+ memcpy(bytemask + a3_ofs - pkt_ofs,
+ eth_bytemask + ETH_ALEN - eth_pkt_ofs,
+ delta);
+
+ pat_len = a3_ofs - pkt_ofs + delta;
+ } else {
+ memcpy(pat, eth_pat, ETH_ALEN - eth_pkt_ofs);
+ memcpy(bytemask, eth_bytemask, ETH_ALEN - eth_pkt_ofs);
+
+ memcpy(pat + a3_ofs - pkt_ofs,
+ eth_pat + ETH_ALEN - eth_pkt_ofs,
+ ETH_ALEN);
+ memcpy(bytemask + a3_ofs - pkt_ofs,
+ eth_bytemask + ETH_ALEN - eth_pkt_ofs,
+ ETH_ALEN);
+
+ delta = eth_pkt_ofs + eth_pat_len - prot_ofs;
+ memcpy(pat + i80211_hdr_len + r1042_eth_ofs - pkt_ofs,
+ eth_pat + prot_ofs - eth_pkt_ofs,
+ delta);
+ memcpy(bytemask + i80211_hdr_len + r1042_eth_ofs - pkt_ofs,
+ eth_bytemask + prot_ofs - eth_pkt_ofs,
+ delta);
+
+ pat_len = i80211_hdr_len + r1042_eth_ofs - pkt_ofs + delta;
+ }
+ } else if (eth_pkt_ofs < prot_ofs) {
+ pkt_ofs = eth_pkt_ofs - ETH_ALEN + a3_ofs;
+
+ if (size_add(eth_pkt_ofs, eth_pat_len) < prot_ofs) {
+ memcpy(pat, eth_pat, eth_pat_len);
+ memcpy(bytemask, eth_bytemask, eth_pat_len);
+
+ pat_len = eth_pat_len;
+ } else {
+ memcpy(pat, eth_pat, prot_ofs - eth_pkt_ofs);
+ memcpy(bytemask, eth_bytemask, prot_ofs - eth_pkt_ofs);
+
+ delta = eth_pkt_ofs + eth_pat_len - prot_ofs;
+ memcpy(pat + i80211_hdr_len + r1042_eth_ofs - pkt_ofs,
+ eth_pat + prot_ofs - eth_pkt_ofs,
+ delta);
+ memcpy(bytemask + i80211_hdr_len + r1042_eth_ofs - pkt_ofs,
+ eth_bytemask + prot_ofs - eth_pkt_ofs,
+ delta);
+
+ pat_len = i80211_hdr_len + r1042_eth_ofs - pkt_ofs + delta;
+ }
+ } else {
+ pkt_ofs = eth_pkt_ofs - prot_ofs + i80211_hdr_len + r1042_eth_ofs;
+
+ memcpy(pat, eth_pat, eth_pat_len);
+ memcpy(bytemask, eth_bytemask, eth_pat_len);
+
+ pat_len = eth_pat_len;
+ }
+
+ i80211_pattern->pattern_len = pat_len;
+ i80211_pattern->pkt_offset = pkt_ofs;
+}
+
+static int
+ath12k_wow_pno_check_and_convert(struct ath12k *ar, u32 vdev_id,
+ const struct cfg80211_sched_scan_request *nd_config,
+ struct wmi_pno_scan_req_arg *pno)
+{
+ int i, j;
+ u8 ssid_len;
+
+ pno->enable = 1;
+ pno->vdev_id = vdev_id;
+ pno->uc_networks_count = nd_config->n_match_sets;
+
+ if (!pno->uc_networks_count ||
+ pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
+ return -EINVAL;
+
+ if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
+ return -EINVAL;
+
+ /* Filling per profile params */
+ for (i = 0; i < pno->uc_networks_count; i++) {
+ ssid_len = nd_config->match_sets[i].ssid.ssid_len;
+
+ if (ssid_len == 0 || ssid_len > 32)
+ return -EINVAL;
+
+ pno->a_networks[i].ssid.ssid_len = ssid_len;
+
+ memcpy(pno->a_networks[i].ssid.ssid,
+ nd_config->match_sets[i].ssid.ssid,
+ ssid_len);
+ pno->a_networks[i].authentication = 0;
+ pno->a_networks[i].encryption = 0;
+ pno->a_networks[i].bcast_nw_type = 0;
+
+ /* Copying list of valid channel into request */
+ pno->a_networks[i].channel_count = nd_config->n_channels;
+ pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
+
+ for (j = 0; j < nd_config->n_channels; j++) {
+ pno->a_networks[i].channels[j] =
+ nd_config->channels[j]->center_freq;
+ }
+ }
+
+ /* set scan to passive if no SSIDs are specified in the request */
+ if (nd_config->n_ssids == 0)
+ pno->do_passive_scan = true;
+ else
+ pno->do_passive_scan = false;
+
+ for (i = 0; i < nd_config->n_ssids; i++) {
+ for (j = 0; j < pno->uc_networks_count; j++) {
+ if (pno->a_networks[j].ssid.ssid_len ==
+ nd_config->ssids[i].ssid_len &&
+ !memcmp(pno->a_networks[j].ssid.ssid,
+ nd_config->ssids[i].ssid,
+ pno->a_networks[j].ssid.ssid_len)) {
+ pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
+ break;
+ }
+ }
+ }
+
+ if (nd_config->n_scan_plans == 2) {
+ pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
+ pno->slow_scan_period =
+ nd_config->scan_plans[1].interval * MSEC_PER_SEC;
+ } else if (nd_config->n_scan_plans == 1) {
+ pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ pno->fast_scan_max_cycles = 1;
+ pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ } else {
+ ath12k_warn(ar->ab, "Invalid number of PNO scan plans: %d",
+ nd_config->n_scan_plans);
+ }
+
+ if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ /* enable mac randomization */
+ pno->enable_pno_scan_randomization = 1;
+ memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
+ memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
+ }
+
+ pno->delay_start_time = nd_config->delay;
+
+ /* Current FW does not support min-max range for dwell time */
+ pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
+ pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
+
+ return 0;
+}
+
+static int ath12k_wow_vif_set_wakeups(struct ath12k_link_vif *arvif,
+ struct cfg80211_wowlan *wowlan)
+{
+ const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
+ struct ath12k *ar = arvif->ar;
+ unsigned long wow_mask = 0;
+ int pattern_id = 0;
+ int ret, i, j;
+
+ /* Setup requested WOW features */
+ switch (arvif->ahvif->vdev_type) {
+ case WMI_VDEV_TYPE_IBSS:
+ __set_bit(WOW_BEACON_EVENT, &wow_mask);
+ fallthrough;
+ case WMI_VDEV_TYPE_AP:
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
+ __set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_HTT_EVENT, &wow_mask);
+ __set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
+ break;
+ case WMI_VDEV_TYPE_STA:
+ if (wowlan->disconnect) {
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_BMISS_EVENT, &wow_mask);
+ __set_bit(WOW_CSA_IE_EVENT, &wow_mask);
+ }
+
+ if (wowlan->magic_pkt)
+ __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
+
+ if (wowlan->nd_config) {
+ struct wmi_pno_scan_req_arg *pno;
+ int ret;
+
+ pno = kzalloc(sizeof(*pno), GFP_KERNEL);
+ if (!pno)
+ return -ENOMEM;
+
+ ar->nlo_enabled = true;
+
+ ret = ath12k_wow_pno_check_and_convert(ar, arvif->vdev_id,
+ wowlan->nd_config, pno);
+ if (!ret) {
+ ath12k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
+ __set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
+ }
+
+ kfree(pno);
+ }
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ const struct cfg80211_pkt_pattern *eth_pattern = &patterns[i];
+ struct ath12k_pkt_pattern new_pattern = {};
+
+ if (WARN_ON(eth_pattern->pattern_len > WOW_MAX_PATTERN_SIZE))
+ return -EINVAL;
+
+ if (ar->ab->wow.wmi_conf_rx_decap_mode ==
+ ATH12K_HW_TXRX_NATIVE_WIFI) {
+ ath12k_wow_convert_8023_to_80211(ar, eth_pattern,
+ &new_pattern);
+
+ if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
+ return -EINVAL;
+ } else {
+ memcpy(new_pattern.pattern, eth_pattern->pattern,
+ eth_pattern->pattern_len);
+
+ /* convert bitmask to bytemask */
+ for (j = 0; j < eth_pattern->pattern_len; j++)
+ if (eth_pattern->mask[j / 8] & BIT(j % 8))
+ new_pattern.bytemask[j] = 0xff;
+
+ new_pattern.pattern_len = eth_pattern->pattern_len;
+ new_pattern.pkt_offset = eth_pattern->pkt_offset;
+ }
+
+ ret = ath12k_wmi_wow_add_pattern(ar, arvif->vdev_id,
+ pattern_id,
+ new_pattern.pattern,
+ new_pattern.bytemask,
+ new_pattern.pattern_len,
+ new_pattern.pkt_offset);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n",
+ pattern_id,
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ pattern_id++;
+ __set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
+ }
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ if (!test_bit(i, &wow_mask))
+ continue;
+ ret = ath12k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_set_wakeups(struct ath12k *ar,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath12k_link_vif *arvif;
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (ath12k_wow_is_p2p_vdev(arvif->ahvif))
+ continue;
+ ret = ath12k_wow_vif_set_wakeups(arvif, wowlan);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_vdev_clean_nlo(struct ath12k *ar, u32 vdev_id)
+{
+ struct wmi_pno_scan_req_arg *pno;
+ int ret;
+
+ if (!ar->nlo_enabled)
+ return 0;
+
+ pno = kzalloc(sizeof(*pno), GFP_KERNEL);
+ if (!pno)
+ return -ENOMEM;
+
+ pno->enable = 0;
+ ret = ath12k_wmi_wow_config_pno(ar, vdev_id, pno);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to disable PNO: %d", ret);
+ goto out;
+ }
+
+ ar->nlo_enabled = false;
+
+out:
+ kfree(pno);
+ return ret;
+}
+
+static int ath12k_wow_vif_clean_nlo(struct ath12k_link_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+
+ switch (arvif->ahvif->vdev_type) {
+ case WMI_VDEV_TYPE_STA:
+ return ath12k_wow_vdev_clean_nlo(ar, arvif->vdev_id);
+ default:
+ return 0;
+ }
+}
+
+static int ath12k_wow_nlo_cleanup(struct ath12k *ar)
+{
+ struct ath12k_link_vif *arvif;
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (ath12k_wow_is_p2p_vdev(arvif->ahvif))
+ continue;
+
+ ret = ath12k_wow_vif_clean_nlo(arvif);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_set_hw_filter(struct ath12k *ar)
+{
+ struct wmi_hw_data_filter_arg arg;
+ struct ath12k_link_vif *arvif;
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA)
+ continue;
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.enable = true;
+ arg.hw_filter_bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC;
+ ret = ath12k_wmi_hw_data_filter_cmd(ar, &arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_clear_hw_filter(struct ath12k *ar)
+{
+ struct wmi_hw_data_filter_arg arg;
+ struct ath12k_link_vif *arvif;
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA)
+ continue;
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.enable = false;
+ arg.hw_filter_bitmap = 0;
+ ret = ath12k_wmi_hw_data_filter_cmd(ar, &arg);
+
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ath12k_wow_generate_ns_mc_addr(struct ath12k_base *ab,
+ struct wmi_arp_ns_offload_arg *offload)
+{
+ int i;
+
+ for (i = 0; i < offload->ipv6_count; i++) {
+ offload->self_ipv6_addr[i][0] = 0xff;
+ offload->self_ipv6_addr[i][1] = 0x02;
+ offload->self_ipv6_addr[i][11] = 0x01;
+ offload->self_ipv6_addr[i][12] = 0xff;
+ offload->self_ipv6_addr[i][13] =
+ offload->ipv6_addr[i][13];
+ offload->self_ipv6_addr[i][14] =
+ offload->ipv6_addr[i][14];
+ offload->self_ipv6_addr[i][15] =
+ offload->ipv6_addr[i][15];
+ ath12k_dbg(ab, ATH12K_DBG_WOW, "NS solicited addr %pI6\n",
+ offload->self_ipv6_addr[i]);
+ }
+}
+
+static void ath12k_wow_prepare_ns_offload(struct ath12k_link_vif *arvif,
+ struct wmi_arp_ns_offload_arg *offload)
+{
+ struct net_device *ndev = ieee80211_vif_to_wdev(arvif->ahvif->vif)->netdev;
+ struct ath12k_base *ab = arvif->ar->ab;
+ struct inet6_ifaddr *ifa6;
+ struct ifacaddr6 *ifaca6;
+ struct inet6_dev *idev;
+ u32 count = 0, scope;
+
+ if (!ndev)
+ return;
+
+ idev = in6_dev_get(ndev);
+ if (!idev)
+ return;
+
+ ath12k_dbg(ab, ATH12K_DBG_WOW, "wow prepare ns offload\n");
+
+ read_lock_bh(&idev->lock);
+
+ /* get unicast address */
+ list_for_each_entry(ifa6, &idev->addr_list, if_list) {
+ if (count >= WMI_IPV6_MAX_COUNT)
+ goto unlock;
+
+ if (ifa6->flags & IFA_F_DADFAILED)
+ continue;
+
+ scope = ipv6_addr_src_scope(&ifa6->addr);
+ if (scope != IPV6_ADDR_SCOPE_LINKLOCAL &&
+ scope != IPV6_ADDR_SCOPE_GLOBAL) {
+ ath12k_dbg(ab, ATH12K_DBG_WOW,
+ "Unsupported ipv6 scope: %d\n", scope);
+ continue;
+ }
+
+ memcpy(offload->ipv6_addr[count], &ifa6->addr.s6_addr,
+ sizeof(ifa6->addr.s6_addr));
+ offload->ipv6_type[count] = WMI_IPV6_UC_TYPE;
+ ath12k_dbg(ab, ATH12K_DBG_WOW, "mac count %d ipv6 uc %pI6 scope %d\n",
+ count, offload->ipv6_addr[count],
+ scope);
+ count++;
+ }
+
+ /* get anycast address */
+ rcu_read_lock();
+
+ for (ifaca6 = rcu_dereference(idev->ac_list); ifaca6;
+ ifaca6 = rcu_dereference(ifaca6->aca_next)) {
+ if (count >= WMI_IPV6_MAX_COUNT) {
+ rcu_read_unlock();
+ goto unlock;
+ }
+
+ scope = ipv6_addr_src_scope(&ifaca6->aca_addr);
+ if (scope != IPV6_ADDR_SCOPE_LINKLOCAL &&
+ scope != IPV6_ADDR_SCOPE_GLOBAL) {
+ ath12k_dbg(ab, ATH12K_DBG_WOW,
+ "Unsupported ipv scope: %d\n", scope);
+ continue;
+ }
+
+ memcpy(offload->ipv6_addr[count], &ifaca6->aca_addr,
+ sizeof(ifaca6->aca_addr));
+ offload->ipv6_type[count] = WMI_IPV6_AC_TYPE;
+ ath12k_dbg(ab, ATH12K_DBG_WOW, "mac count %d ipv6 ac %pI6 scope %d\n",
+ count, offload->ipv6_addr[count],
+ scope);
+ count++;
+ }
+
+ rcu_read_unlock();
+
+unlock:
+ read_unlock_bh(&idev->lock);
+
+ in6_dev_put(idev);
+
+ offload->ipv6_count = count;
+ ath12k_wow_generate_ns_mc_addr(ab, offload);
+}
+
+static void ath12k_wow_prepare_arp_offload(struct ath12k_link_vif *arvif,
+ struct wmi_arp_ns_offload_arg *offload)
+{
+ struct ieee80211_vif *vif = arvif->ahvif->vif;
+ struct ieee80211_vif_cfg vif_cfg = vif->cfg;
+ struct ath12k_base *ab = arvif->ar->ab;
+ u32 ipv4_cnt;
+
+ ath12k_dbg(ab, ATH12K_DBG_WOW, "wow prepare arp offload\n");
+
+ ipv4_cnt = min(vif_cfg.arp_addr_cnt, WMI_IPV4_MAX_COUNT);
+ memcpy(offload->ipv4_addr, vif_cfg.arp_addr_list, ipv4_cnt * sizeof(u32));
+ offload->ipv4_count = ipv4_cnt;
+
+ ath12k_dbg(ab, ATH12K_DBG_WOW,
+ "wow arp_addr_cnt %d vif->addr %pM, offload_addr %pI4\n",
+ vif_cfg.arp_addr_cnt, vif->addr, offload->ipv4_addr);
+}
+
+static int ath12k_wow_arp_ns_offload(struct ath12k *ar, bool enable)
+{
+ struct wmi_arp_ns_offload_arg *offload;
+ struct ath12k_link_vif *arvif;
+ struct ath12k_vif *ahvif;
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ offload = kmalloc(sizeof(*offload), GFP_KERNEL);
+ if (!offload)
+ return -ENOMEM;
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ahvif = arvif->ahvif;
+
+ if (ahvif->vdev_type != WMI_VDEV_TYPE_STA)
+ continue;
+
+ memset(offload, 0, sizeof(*offload));
+
+ memcpy(offload->mac_addr, ahvif->vif->addr, ETH_ALEN);
+ ath12k_wow_prepare_ns_offload(arvif, offload);
+ ath12k_wow_prepare_arp_offload(arvif, offload);
+
+ ret = ath12k_wmi_arp_ns_offload(ar, arvif, offload, enable);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n",
+ arvif->vdev_id, enable, ret);
+ return ret;
+ }
+ }
+
+ kfree(offload);
+
+ return 0;
+}
+
+static int ath12k_gtk_rekey_offload(struct ath12k *ar, bool enable)
+{
+ struct ath12k_link_vif *arvif;
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA ||
+ !arvif->is_up ||
+ !arvif->rekey_data.enable_offload)
+ continue;
+
+ /* get rekey info before disable rekey offload */
+ if (!enable) {
+ ret = ath12k_wmi_gtk_rekey_getinfo(ar, arvif);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ ret = ath12k_wmi_gtk_rekey_offload(ar, arvif, enable);
+
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n",
+ arvif->vdev_id, enable, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_protocol_offload(struct ath12k *ar, bool enable)
+{
+ int ret;
+
+ ret = ath12k_wow_arp_ns_offload(ar, enable);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to offload ARP and NS %d %d\n",
+ enable, ret);
+ return ret;
+ }
+
+ ret = ath12k_gtk_rekey_offload(ar, enable);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to offload gtk rekey %d %d\n",
+ enable, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_set_keepalive(struct ath12k *ar,
+ enum wmi_sta_keepalive_method method,
+ u32 interval)
+{
+ struct ath12k_link_vif *arvif;
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath12k_mac_vif_set_keepalive(arvif, method, interval);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
+ int ret;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ ret = ath12k_wow_cleanup(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clear wow wakeup events: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = ath12k_wow_set_wakeups(ar, wowlan);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set wow wakeup events: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath12k_wow_protocol_offload(ar, true);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set wow protocol offload events: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath12k_mac_wait_tx_complete(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = ath12k_wow_set_hw_filter(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set hw filter: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath12k_wow_set_keepalive(ar,
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
+ WMI_STA_KEEPALIVE_INTERVAL_DEFAULT);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = ath12k_wow_enable(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to start wow: %d\n", ret);
+ goto cleanup;
+ }
+
+ ath12k_hif_irq_disable(ar->ab);
+ ath12k_hif_ce_irq_disable(ar->ab);
+
+ ret = ath12k_hif_suspend(ar->ab);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to suspend hif: %d\n", ret);
+ goto wakeup;
+ }
+
+ goto exit;
+
+wakeup:
+ ath12k_wow_wakeup(ar);
+
+cleanup:
+ ath12k_wow_cleanup(ar);
+
+exit:
+ return ret ? 1 : 0;
+}
+
+void ath12k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ device_set_wakeup_enable(ar->ab->dev, enabled);
+}
+
+int ath12k_wow_op_resume(struct ieee80211_hw *hw)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
+ int ret;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ ret = ath12k_hif_resume(ar->ab);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to resume hif: %d\n", ret);
+ goto exit;
+ }
+
+ ath12k_hif_ce_irq_enable(ar->ab);
+ ath12k_hif_irq_enable(ar->ab);
+
+ ret = ath12k_wow_wakeup(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath12k_wow_nlo_cleanup(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath12k_wow_clear_hw_filter(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clear hw filter: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath12k_wow_protocol_offload(ar, false);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = ath12k_wow_set_keepalive(ar,
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
+ WMI_STA_KEEPALIVE_INTERVAL_DISABLE);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret);
+ goto exit;
+ }
+
+exit:
+ if (ret) {
+ switch (ah->state) {
+ case ATH12K_HW_STATE_ON:
+ ah->state = ATH12K_HW_STATE_RESTARTING;
+ ret = 1;
+ break;
+ case ATH12K_HW_STATE_OFF:
+ case ATH12K_HW_STATE_RESTARTING:
+ case ATH12K_HW_STATE_RESTARTED:
+ case ATH12K_HW_STATE_WEDGED:
+ ath12k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n",
+ ah->state);
+ ret = -EIO;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int ath12k_wow_init(struct ath12k *ar)
+{
+ if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map))
+ return 0;
+
+ ar->wow.wowlan_support = ath12k_wowlan_support;
+
+ if (ar->ab->wow.wmi_conf_rx_decap_mode == ATH12K_HW_TXRX_NATIVE_WIFI) {
+ ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
+ ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
+ }
+
+ if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
+ ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
+ ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
+ }
+
+ ar->wow.max_num_patterns = ATH12K_WOW_PATTERNS;
+ ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
+ ar->ah->hw->wiphy->wowlan = &ar->wow.wowlan_support;
+
+ device_set_wakeup_capable(ar->ab->dev, true);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath12k/wow.h b/drivers/net/wireless/ath/ath12k/wow.h
new file mode 100644
index 000000000000..af9be5fadcc3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wow.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_WOW_H
+#define ATH12K_WOW_H
+
+#define ATH12K_WOW_RETRY_NUM 10
+#define ATH12K_WOW_RETRY_WAIT_MS 200
+#define ATH12K_WOW_PATTERNS 22
+
+struct ath12k_wow {
+ u32 max_num_patterns;
+ struct completion wakeup_completed;
+ struct wiphy_wowlan_support wowlan_support;
+};
+
+struct ath12k_pkt_pattern {
+ u8 pattern[WOW_MAX_PATTERN_SIZE];
+ u8 bytemask[WOW_MAX_PATTERN_SIZE];
+ int pattern_len;
+ int pkt_offset;
+};
+
+struct rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 eth_type;
+} __packed;
+
+#ifdef CONFIG_PM
+
+int ath12k_wow_init(struct ath12k *ar);
+int ath12k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan);
+int ath12k_wow_op_resume(struct ieee80211_hw *hw);
+void ath12k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled);
+int ath12k_wow_enable(struct ath12k *ar);
+int ath12k_wow_wakeup(struct ath12k *ar);
+
+#else
+
+static inline int ath12k_wow_init(struct ath12k *ar)
+{
+ return 0;
+}
+
+static inline int ath12k_wow_enable(struct ath12k *ar)
+{
+ return 0;
+}
+
+static inline int ath12k_wow_wakeup(struct ath12k *ar)
+{
+ return 0;
+}
+#endif /* CONFIG_PM */
+#endif /* ATH12K_WOW_H */