diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath10k')
64 files changed, 71910 insertions, 8507 deletions
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index cde58fe96254..876aed765833 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -1,39 +1,98 @@ +# SPDX-License-Identifier: ISC config ATH10K - tristate "Atheros 802.11ac wireless cards support" - depends on MAC80211 + tristate "Atheros 802.11ac wireless cards support" + depends on MAC80211 && HAS_DMA select ATH_COMMON - ---help--- - This module adds support for wireless adapters based on - Atheros IEEE 802.11ac family of chipsets. + select CRC32 + select WANT_DEV_COREDUMP + select ATH10K_CE + help + This module adds support for wireless adapters based on + Atheros IEEE 802.11ac family of chipsets. - If you choose to build a module, it'll be called ath10k. + If you choose to build a module, it'll be called ath10k. + +config ATH10K_CE + bool config ATH10K_PCI tristate "Atheros ath10k PCI support" depends on ATH10K && PCI - ---help--- + help This module adds support for PCIE bus +config ATH10K_AHB + bool "Atheros ath10k AHB support" + depends on ATH10K_PCI && OF && RESET_CONTROLLER + help + This module adds support for AHB bus + +config ATH10K_SDIO + tristate "Atheros ath10k SDIO support" + depends on ATH10K && MMC + help + This module adds support for SDIO/MMC bus. + +config ATH10K_USB + tristate "Atheros ath10k USB support (EXPERIMENTAL)" + depends on ATH10K && USB + help + This module adds experimental support for USB bus. Currently + work in progress and will not fully work. + +config ATH10K_SNOC + tristate "Qualcomm ath10k SNOC support" + depends on ATH10K + depends on ARCH_QCOM || COMPILE_TEST + depends on QCOM_SMEM + depends on QCOM_RPROC_COMMON || QCOM_RPROC_COMMON=n + select QCOM_SCM + select QCOM_QMI_HELPERS + help + This module adds support for integrated WCN3990 chip connected + to system NOC(SNOC). + config ATH10K_DEBUG bool "Atheros ath10k debugging" depends on ATH10K - ---help--- + help Enables debug support If unsure, say Y to make it easier to debug problems. config ATH10K_DEBUGFS bool "Atheros ath10k debugfs support" - depends on ATH10K - ---help--- + depends on ATH10K && DEBUG_FS + help Enabled debugfs support If unsure, say Y to make it easier to debug problems. +config ATH10K_LEDS + bool + depends on ATH10K + depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211 + default y + +config ATH10K_SPECTRAL + bool "Atheros ath10k spectral scan support" + depends on ATH10K_DEBUGFS + select RELAY + default n + help + Say Y to enable access to the FFT/spectral data via debugfs. + config ATH10K_TRACING bool "Atheros ath10k tracing support" depends on ATH10K depends on EVENT_TRACING - ---help--- + help Select this to ath10k use tracing infrastructure. +config ATH10K_DFS_CERTIFIED + bool "Atheros DFS support for certified platforms" + depends on ATH10K && CFG80211_CERTIFICATION_ONUS + default n + help + This option enables DFS support for initiating radiation on + ath10k. diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile index a4179f49ee1f..02bf9b629038 100644 --- a/drivers/net/wireless/ath/ath10k/Makefile +++ b/drivers/net/wireless/ath/ath10k/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: ISC obj-$(CONFIG_ATH10K) += ath10k_core.o ath10k_core-y += mac.o \ debug.o \ @@ -8,13 +9,37 @@ ath10k_core-y += mac.o \ htt_tx.o \ txrx.o \ wmi.o \ - bmi.o + wmi-tlv.o \ + bmi.o \ + hw.o \ + p2p.o \ + swap.o +ath10k_core-$(CONFIG_ATH10K_SPECTRAL) += spectral.o +ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o +ath10k_core-$(CONFIG_THERMAL) += thermal.o +ath10k_core-$(CONFIG_ATH10K_LEDS) += leds.o +ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o +ath10k_core-$(CONFIG_PM) += wow.o +ath10k_core-$(CONFIG_DEV_COREDUMP) += coredump.o +ath10k_core-$(CONFIG_ATH10K_CE) += ce.o obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o -ath10k_pci-y += pci.o \ - ce.o +ath10k_pci-y += pci.o + +ath10k_pci-$(CONFIG_ATH10K_AHB) += ahb.o + +obj-$(CONFIG_ATH10K_SDIO) += ath10k_sdio.o +ath10k_sdio-y += sdio.o + +obj-$(CONFIG_ATH10K_USB) += ath10k_usb.o +ath10k_usb-y += usb.o + +obj-$(CONFIG_ATH10K_SNOC) += ath10k_snoc.o +ath10k_snoc-y += qmi.o \ + qmi_wlfw_v01.o \ + snoc.o # for tracing framework to find trace.h CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c new file mode 100644 index 000000000000..eb8b35b6224d --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -0,0 +1,862 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2016-2017 Qualcomm Atheros, Inc. All rights reserved. + * Copyright (c) 2015 The Linux Foundation. All rights reserved. + */ +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/reset.h> +#include "core.h" +#include "debug.h" +#include "pci.h" +#include "ahb.h" + +static const struct of_device_id ath10k_ahb_of_match[] = { + { .compatible = "qcom,ipq4019-wifi", + .data = (void *)ATH10K_HW_QCA4019 + }, + { } +}; + +MODULE_DEVICE_TABLE(of, ath10k_ahb_of_match); + +#define QCA4019_SRAM_ADDR 0x000C0000 +#define QCA4019_SRAM_LEN 0x00040000 /* 256 kb */ + +static inline struct ath10k_ahb *ath10k_ahb_priv(struct ath10k *ar) +{ + return &ath10k_pci_priv(ar)->ahb[0]; +} + +static void ath10k_ahb_write32(struct ath10k *ar, u32 offset, u32 value) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + iowrite32(value, ar_ahb->mem + offset); +} + +static u32 ath10k_ahb_read32(struct ath10k *ar, u32 offset) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + return ioread32(ar_ahb->mem + offset); +} + +static u32 ath10k_ahb_gcc_read32(struct ath10k *ar, u32 offset) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + return ioread32(ar_ahb->gcc_mem + offset); +} + +static void ath10k_ahb_tcsr_write32(struct ath10k *ar, u32 offset, u32 value) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + iowrite32(value, ar_ahb->tcsr_mem + offset); +} + +static u32 ath10k_ahb_tcsr_read32(struct ath10k *ar, u32 offset) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + return ioread32(ar_ahb->tcsr_mem + offset); +} + +static u32 ath10k_ahb_soc_read32(struct ath10k *ar, u32 addr) +{ + return ath10k_ahb_read32(ar, RTC_SOC_BASE_ADDRESS + addr); +} + +static int ath10k_ahb_get_num_banks(struct ath10k *ar) +{ + if (ar->hw_rev == ATH10K_HW_QCA4019) + return 1; + + ath10k_warn(ar, "unknown number of banks, assuming 1\n"); + return 1; +} + +static int ath10k_ahb_clock_init(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + struct device *dev; + + dev = &ar_ahb->pdev->dev; + + ar_ahb->cmd_clk = devm_clk_get(dev, "wifi_wcss_cmd"); + if (IS_ERR_OR_NULL(ar_ahb->cmd_clk)) { + ath10k_err(ar, "failed to get cmd clk: %ld\n", + PTR_ERR(ar_ahb->cmd_clk)); + return ar_ahb->cmd_clk ? PTR_ERR(ar_ahb->cmd_clk) : -ENODEV; + } + + ar_ahb->ref_clk = devm_clk_get(dev, "wifi_wcss_ref"); + if (IS_ERR_OR_NULL(ar_ahb->ref_clk)) { + ath10k_err(ar, "failed to get ref clk: %ld\n", + PTR_ERR(ar_ahb->ref_clk)); + return ar_ahb->ref_clk ? PTR_ERR(ar_ahb->ref_clk) : -ENODEV; + } + + ar_ahb->rtc_clk = devm_clk_get(dev, "wifi_wcss_rtc"); + if (IS_ERR_OR_NULL(ar_ahb->rtc_clk)) { + ath10k_err(ar, "failed to get rtc clk: %ld\n", + PTR_ERR(ar_ahb->rtc_clk)); + return ar_ahb->rtc_clk ? PTR_ERR(ar_ahb->rtc_clk) : -ENODEV; + } + + return 0; +} + +static void ath10k_ahb_clock_deinit(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + ar_ahb->cmd_clk = NULL; + ar_ahb->ref_clk = NULL; + ar_ahb->rtc_clk = NULL; +} + +static int ath10k_ahb_clock_enable(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + int ret; + + if (IS_ERR_OR_NULL(ar_ahb->cmd_clk) || + IS_ERR_OR_NULL(ar_ahb->ref_clk) || + IS_ERR_OR_NULL(ar_ahb->rtc_clk)) { + ath10k_err(ar, "clock(s) is/are not initialized\n"); + ret = -EIO; + goto out; + } + + ret = clk_prepare_enable(ar_ahb->cmd_clk); + if (ret) { + ath10k_err(ar, "failed to enable cmd clk: %d\n", ret); + goto out; + } + + ret = clk_prepare_enable(ar_ahb->ref_clk); + if (ret) { + ath10k_err(ar, "failed to enable ref clk: %d\n", ret); + goto err_cmd_clk_disable; + } + + ret = clk_prepare_enable(ar_ahb->rtc_clk); + if (ret) { + ath10k_err(ar, "failed to enable rtc clk: %d\n", ret); + goto err_ref_clk_disable; + } + + return 0; + +err_ref_clk_disable: + clk_disable_unprepare(ar_ahb->ref_clk); + +err_cmd_clk_disable: + clk_disable_unprepare(ar_ahb->cmd_clk); + +out: + return ret; +} + +static void ath10k_ahb_clock_disable(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + clk_disable_unprepare(ar_ahb->cmd_clk); + + clk_disable_unprepare(ar_ahb->ref_clk); + + clk_disable_unprepare(ar_ahb->rtc_clk); +} + +static int ath10k_ahb_rst_ctrl_init(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + struct device *dev; + + dev = &ar_ahb->pdev->dev; + + ar_ahb->core_cold_rst = devm_reset_control_get_exclusive(dev, + "wifi_core_cold"); + if (IS_ERR(ar_ahb->core_cold_rst)) { + ath10k_err(ar, "failed to get core cold rst ctrl: %ld\n", + PTR_ERR(ar_ahb->core_cold_rst)); + return PTR_ERR(ar_ahb->core_cold_rst); + } + + ar_ahb->radio_cold_rst = devm_reset_control_get_exclusive(dev, + "wifi_radio_cold"); + if (IS_ERR(ar_ahb->radio_cold_rst)) { + ath10k_err(ar, "failed to get radio cold rst ctrl: %ld\n", + PTR_ERR(ar_ahb->radio_cold_rst)); + return PTR_ERR(ar_ahb->radio_cold_rst); + } + + ar_ahb->radio_warm_rst = devm_reset_control_get_exclusive(dev, + "wifi_radio_warm"); + if (IS_ERR(ar_ahb->radio_warm_rst)) { + ath10k_err(ar, "failed to get radio warm rst ctrl: %ld\n", + PTR_ERR(ar_ahb->radio_warm_rst)); + return PTR_ERR(ar_ahb->radio_warm_rst); + } + + ar_ahb->radio_srif_rst = devm_reset_control_get_exclusive(dev, + "wifi_radio_srif"); + if (IS_ERR(ar_ahb->radio_srif_rst)) { + ath10k_err(ar, "failed to get radio srif rst ctrl: %ld\n", + PTR_ERR(ar_ahb->radio_srif_rst)); + return PTR_ERR(ar_ahb->radio_srif_rst); + } + + ar_ahb->cpu_init_rst = devm_reset_control_get_exclusive(dev, + "wifi_cpu_init"); + if (IS_ERR(ar_ahb->cpu_init_rst)) { + ath10k_err(ar, "failed to get cpu init rst ctrl: %ld\n", + PTR_ERR(ar_ahb->cpu_init_rst)); + return PTR_ERR(ar_ahb->cpu_init_rst); + } + + return 0; +} + +static void ath10k_ahb_rst_ctrl_deinit(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + ar_ahb->core_cold_rst = NULL; + ar_ahb->radio_cold_rst = NULL; + ar_ahb->radio_warm_rst = NULL; + ar_ahb->radio_srif_rst = NULL; + ar_ahb->cpu_init_rst = NULL; +} + +static int ath10k_ahb_release_reset(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + int ret; + + if (IS_ERR_OR_NULL(ar_ahb->radio_cold_rst) || + IS_ERR_OR_NULL(ar_ahb->radio_warm_rst) || + IS_ERR_OR_NULL(ar_ahb->radio_srif_rst) || + IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) { + ath10k_err(ar, "rst ctrl(s) is/are not initialized\n"); + return -EINVAL; + } + + ret = reset_control_deassert(ar_ahb->radio_cold_rst); + if (ret) { + ath10k_err(ar, "failed to deassert radio cold rst: %d\n", ret); + return ret; + } + + ret = reset_control_deassert(ar_ahb->radio_warm_rst); + if (ret) { + ath10k_err(ar, "failed to deassert radio warm rst: %d\n", ret); + return ret; + } + + ret = reset_control_deassert(ar_ahb->radio_srif_rst); + if (ret) { + ath10k_err(ar, "failed to deassert radio srif rst: %d\n", ret); + return ret; + } + + ret = reset_control_deassert(ar_ahb->cpu_init_rst); + if (ret) { + ath10k_err(ar, "failed to deassert cpu init rst: %d\n", ret); + return ret; + } + + return 0; +} + +static void ath10k_ahb_halt_axi_bus(struct ath10k *ar, u32 haltreq_reg, + u32 haltack_reg) +{ + unsigned long timeout; + u32 val; + + /* Issue halt axi bus request */ + val = ath10k_ahb_tcsr_read32(ar, haltreq_reg); + val |= AHB_AXI_BUS_HALT_REQ; + ath10k_ahb_tcsr_write32(ar, haltreq_reg, val); + + /* Wait for axi bus halted ack */ + timeout = jiffies + msecs_to_jiffies(ATH10K_AHB_AXI_BUS_HALT_TIMEOUT); + do { + val = ath10k_ahb_tcsr_read32(ar, haltack_reg); + if (val & AHB_AXI_BUS_HALT_ACK) + break; + + mdelay(1); + } while (time_before(jiffies, timeout)); + + if (!(val & AHB_AXI_BUS_HALT_ACK)) { + ath10k_err(ar, "failed to halt axi bus: %d\n", val); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_AHB, "axi bus halted\n"); +} + +static void ath10k_ahb_halt_chip(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + u32 core_id, glb_cfg_reg, haltreq_reg, haltack_reg; + u32 val; + int ret; + + if (IS_ERR_OR_NULL(ar_ahb->core_cold_rst) || + IS_ERR_OR_NULL(ar_ahb->radio_cold_rst) || + IS_ERR_OR_NULL(ar_ahb->radio_warm_rst) || + IS_ERR_OR_NULL(ar_ahb->radio_srif_rst) || + IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) { + ath10k_err(ar, "rst ctrl(s) is/are not initialized\n"); + return; + } + + core_id = ath10k_ahb_read32(ar, ATH10K_AHB_WLAN_CORE_ID_REG); + + switch (core_id) { + case 0: + glb_cfg_reg = ATH10K_AHB_TCSR_WIFI0_GLB_CFG; + haltreq_reg = ATH10K_AHB_TCSR_WCSS0_HALTREQ; + haltack_reg = ATH10K_AHB_TCSR_WCSS0_HALTACK; + break; + case 1: + glb_cfg_reg = ATH10K_AHB_TCSR_WIFI1_GLB_CFG; + haltreq_reg = ATH10K_AHB_TCSR_WCSS1_HALTREQ; + haltack_reg = ATH10K_AHB_TCSR_WCSS1_HALTACK; + break; + default: + ath10k_err(ar, "invalid core id %d found, skipping reset sequence\n", + core_id); + return; + } + + ath10k_ahb_halt_axi_bus(ar, haltreq_reg, haltack_reg); + + val = ath10k_ahb_tcsr_read32(ar, glb_cfg_reg); + val |= TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK; + ath10k_ahb_tcsr_write32(ar, glb_cfg_reg, val); + + ret = reset_control_assert(ar_ahb->core_cold_rst); + if (ret) + ath10k_err(ar, "failed to assert core cold rst: %d\n", ret); + msleep(1); + + ret = reset_control_assert(ar_ahb->radio_cold_rst); + if (ret) + ath10k_err(ar, "failed to assert radio cold rst: %d\n", ret); + msleep(1); + + ret = reset_control_assert(ar_ahb->radio_warm_rst); + if (ret) + ath10k_err(ar, "failed to assert radio warm rst: %d\n", ret); + msleep(1); + + ret = reset_control_assert(ar_ahb->radio_srif_rst); + if (ret) + ath10k_err(ar, "failed to assert radio srif rst: %d\n", ret); + msleep(1); + + ret = reset_control_assert(ar_ahb->cpu_init_rst); + if (ret) + ath10k_err(ar, "failed to assert cpu init rst: %d\n", ret); + msleep(10); + + /* Clear halt req and core clock disable req before + * deasserting wifi core reset. + */ + val = ath10k_ahb_tcsr_read32(ar, haltreq_reg); + val &= ~AHB_AXI_BUS_HALT_REQ; + ath10k_ahb_tcsr_write32(ar, haltreq_reg, val); + + val = ath10k_ahb_tcsr_read32(ar, glb_cfg_reg); + val &= ~TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK; + ath10k_ahb_tcsr_write32(ar, glb_cfg_reg, val); + + ret = reset_control_deassert(ar_ahb->core_cold_rst); + if (ret) + ath10k_err(ar, "failed to deassert core cold rst: %d\n", ret); + + ath10k_dbg(ar, ATH10K_DBG_AHB, "core %d reset done\n", core_id); +} + +static irqreturn_t ath10k_ahb_interrupt_handler(int irq, void *arg) +{ + struct ath10k *ar = arg; + + if (!ath10k_pci_irq_pending(ar)) + return IRQ_NONE; + + ath10k_pci_disable_and_clear_intx_irq(ar); + ath10k_pci_irq_msi_fw_mask(ar); + napi_schedule(&ar->napi); + + return IRQ_HANDLED; +} + +static int ath10k_ahb_request_irq_intx(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + int ret; + + ret = request_irq(ar_ahb->irq, + ath10k_ahb_interrupt_handler, + IRQF_SHARED, "ath10k_ahb", ar); + if (ret) { + ath10k_warn(ar, "failed to request legacy irq %d: %d\n", + ar_ahb->irq, ret); + return ret; + } + ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_INTX; + + return 0; +} + +static void ath10k_ahb_release_irq_intx(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + free_irq(ar_ahb->irq, ar); +} + +static void ath10k_ahb_irq_disable(struct ath10k *ar) +{ + ath10k_ce_disable_interrupts(ar); + ath10k_pci_disable_and_clear_intx_irq(ar); +} + +static int ath10k_ahb_resource_init(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + struct platform_device *pdev; + struct resource *res; + int ret; + + pdev = ar_ahb->pdev; + + ar_ahb->mem = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(ar_ahb->mem)) { + ath10k_err(ar, "mem ioremap error\n"); + ret = PTR_ERR(ar_ahb->mem); + goto out; + } + + ar_ahb->mem_len = resource_size(res); + + ar_ahb->gcc_mem = ioremap(ATH10K_GCC_REG_BASE, + ATH10K_GCC_REG_SIZE); + if (!ar_ahb->gcc_mem) { + ath10k_err(ar, "gcc mem ioremap error\n"); + ret = -ENOMEM; + goto err_mem_unmap; + } + + ar_ahb->tcsr_mem = ioremap(ATH10K_TCSR_REG_BASE, + ATH10K_TCSR_REG_SIZE); + if (!ar_ahb->tcsr_mem) { + ath10k_err(ar, "tcsr mem ioremap error\n"); + ret = -ENOMEM; + goto err_gcc_mem_unmap; + } + + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + ath10k_err(ar, "failed to set 32-bit dma mask: %d\n", ret); + goto err_tcsr_mem_unmap; + } + + ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + ath10k_err(ar, "failed to set 32-bit consistent dma: %d\n", + ret); + goto err_tcsr_mem_unmap; + } + + ret = ath10k_ahb_clock_init(ar); + if (ret) + goto err_tcsr_mem_unmap; + + ret = ath10k_ahb_rst_ctrl_init(ar); + if (ret) + goto err_clock_deinit; + + ar_ahb->irq = platform_get_irq_byname(pdev, "legacy"); + if (ar_ahb->irq < 0) { + ath10k_err(ar, "failed to get irq number: %d\n", ar_ahb->irq); + ret = ar_ahb->irq; + goto err_clock_deinit; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "irq: %d\n", ar_ahb->irq); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%p mem_len: %lu gcc mem: 0x%p tcsr_mem: 0x%p\n", + ar_ahb->mem, ar_ahb->mem_len, + ar_ahb->gcc_mem, ar_ahb->tcsr_mem); + return 0; + +err_clock_deinit: + ath10k_ahb_clock_deinit(ar); + +err_tcsr_mem_unmap: + iounmap(ar_ahb->tcsr_mem); + +err_gcc_mem_unmap: + ar_ahb->tcsr_mem = NULL; + iounmap(ar_ahb->gcc_mem); + +err_mem_unmap: + ar_ahb->gcc_mem = NULL; + devm_iounmap(&pdev->dev, ar_ahb->mem); + +out: + ar_ahb->mem = NULL; + return ret; +} + +static void ath10k_ahb_resource_deinit(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + struct device *dev; + + dev = &ar_ahb->pdev->dev; + + if (ar_ahb->mem) + devm_iounmap(dev, ar_ahb->mem); + + if (ar_ahb->gcc_mem) + iounmap(ar_ahb->gcc_mem); + + if (ar_ahb->tcsr_mem) + iounmap(ar_ahb->tcsr_mem); + + ar_ahb->mem = NULL; + ar_ahb->gcc_mem = NULL; + ar_ahb->tcsr_mem = NULL; + + ath10k_ahb_clock_deinit(ar); + ath10k_ahb_rst_ctrl_deinit(ar); +} + +static int ath10k_ahb_prepare_device(struct ath10k *ar) +{ + u32 val; + int ret; + + ret = ath10k_ahb_clock_enable(ar); + if (ret) { + ath10k_err(ar, "failed to enable clocks\n"); + return ret; + } + + /* Clock for the target is supplied from outside of target (ie, + * external clock module controlled by the host). Target needs + * to know what frequency target cpu is configured which is needed + * for target internal use. Read target cpu frequency info from + * gcc register and write into target's scratch register where + * target expects this information. + */ + val = ath10k_ahb_gcc_read32(ar, ATH10K_AHB_GCC_FEPLL_PLL_DIV); + ath10k_ahb_write32(ar, ATH10K_AHB_WIFI_SCRATCH_5_REG, val); + + ret = ath10k_ahb_release_reset(ar); + if (ret) + goto err_clk_disable; + + ath10k_ahb_irq_disable(ar); + + ath10k_ahb_write32(ar, FW_INDICATOR_ADDRESS, FW_IND_HOST_READY); + + ret = ath10k_pci_wait_for_target_init(ar); + if (ret) + goto err_halt_chip; + + return 0; + +err_halt_chip: + ath10k_ahb_halt_chip(ar); + +err_clk_disable: + ath10k_ahb_clock_disable(ar); + + return ret; +} + +static int ath10k_ahb_chip_reset(struct ath10k *ar) +{ + int ret; + + ath10k_ahb_halt_chip(ar); + ath10k_ahb_clock_disable(ar); + + ret = ath10k_ahb_prepare_device(ar); + if (ret) + return ret; + + return 0; +} + +static int ath10k_ahb_wake_target_cpu(struct ath10k *ar) +{ + u32 addr, val; + + addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS; + val = ath10k_ahb_read32(ar, addr); + val |= ATH10K_AHB_CORE_CTRL_CPU_INTR_MASK; + ath10k_ahb_write32(ar, addr, val); + + return 0; +} + +static int ath10k_ahb_hif_start(struct ath10k *ar) +{ + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif start\n"); + + ath10k_core_napi_enable(ar); + ath10k_ce_enable_interrupts(ar); + ath10k_pci_enable_intx_irq(ar); + + ath10k_pci_rx_post(ar); + + return 0; +} + +static void ath10k_ahb_hif_stop(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif stop\n"); + + ath10k_ahb_irq_disable(ar); + synchronize_irq(ar_ahb->irq); + + ath10k_core_napi_sync_disable(ar); + + ath10k_pci_flush(ar); +} + +static int ath10k_ahb_hif_power_up(struct ath10k *ar, + enum ath10k_firmware_mode fw_mode) +{ + int ret; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif power up\n"); + + ret = ath10k_ahb_chip_reset(ar); + if (ret) { + ath10k_err(ar, "failed to reset chip: %d\n", ret); + goto out; + } + + ret = ath10k_pci_init_pipes(ar); + if (ret) { + ath10k_err(ar, "failed to initialize CE: %d\n", ret); + goto out; + } + + ret = ath10k_pci_init_config(ar); + if (ret) { + ath10k_err(ar, "failed to setup init config: %d\n", ret); + goto err_ce_deinit; + } + + ret = ath10k_ahb_wake_target_cpu(ar); + if (ret) { + ath10k_err(ar, "could not wake up target CPU: %d\n", ret); + goto err_ce_deinit; + } + + return 0; + +err_ce_deinit: + ath10k_pci_ce_deinit(ar); +out: + return ret; +} + +static u32 ath10k_ahb_qca4019_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) +{ + u32 val = 0, region = addr & 0xfffff; + + val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS); + + if (region >= QCA4019_SRAM_ADDR && region <= + (QCA4019_SRAM_ADDR + QCA4019_SRAM_LEN)) { + /* SRAM contents for QCA4019 can be directly accessed and + * no conversions are required + */ + val |= region; + } else { + val |= 0x100000 | region; + } + + return val; +} + +static const struct ath10k_hif_ops ath10k_ahb_hif_ops = { + .tx_sg = ath10k_pci_hif_tx_sg, + .diag_read = ath10k_pci_hif_diag_read, + .diag_write = ath10k_pci_diag_write_mem, + .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, + .start = ath10k_ahb_hif_start, + .stop = ath10k_ahb_hif_stop, + .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe, + .get_default_pipe = ath10k_pci_hif_get_default_pipe, + .send_complete_check = ath10k_pci_hif_send_complete_check, + .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, + .power_up = ath10k_ahb_hif_power_up, + .power_down = ath10k_pci_hif_power_down, + .read32 = ath10k_ahb_read32, + .write32 = ath10k_ahb_write32, +}; + +static const struct ath10k_bus_ops ath10k_ahb_bus_ops = { + .read32 = ath10k_ahb_read32, + .write32 = ath10k_ahb_write32, + .get_num_banks = ath10k_ahb_get_num_banks, +}; + +static int ath10k_ahb_probe(struct platform_device *pdev) +{ + struct ath10k *ar; + struct ath10k_ahb *ar_ahb; + struct ath10k_pci *ar_pci; + enum ath10k_hw_rev hw_rev; + size_t size; + int ret; + struct ath10k_bus_params bus_params = {}; + + hw_rev = (uintptr_t)of_device_get_match_data(&pdev->dev); + if (!hw_rev) { + dev_err(&pdev->dev, "OF data missing\n"); + return -EINVAL; + } + + size = sizeof(*ar_pci) + sizeof(*ar_ahb); + ar = ath10k_core_create(size, &pdev->dev, ATH10K_BUS_AHB, + hw_rev, &ath10k_ahb_hif_ops); + if (!ar) { + dev_err(&pdev->dev, "failed to allocate core\n"); + return -ENOMEM; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "ahb probe\n"); + + ar_pci = ath10k_pci_priv(ar); + ar_ahb = ath10k_ahb_priv(ar); + + ar_ahb->pdev = pdev; + platform_set_drvdata(pdev, ar); + + ret = ath10k_ahb_resource_init(ar); + if (ret) + goto err_core_destroy; + + ar->dev_id = 0; + ar_pci->mem = ar_ahb->mem; + ar_pci->mem_len = ar_ahb->mem_len; + ar_pci->ar = ar; + ar_pci->ce.bus_ops = &ath10k_ahb_bus_ops; + ar_pci->targ_cpu_to_ce_addr = ath10k_ahb_qca4019_targ_cpu_to_ce_addr; + ar->ce_priv = &ar_pci->ce; + + ret = ath10k_pci_setup_resource(ar); + if (ret) { + ath10k_err(ar, "failed to setup resource: %d\n", ret); + goto err_resource_deinit; + } + + ath10k_pci_init_napi(ar); + + ret = ath10k_ahb_request_irq_intx(ar); + if (ret) + goto err_free_pipes; + + ret = ath10k_ahb_prepare_device(ar); + if (ret) + goto err_free_irq; + + ath10k_pci_ce_deinit(ar); + + bus_params.dev_type = ATH10K_DEV_TYPE_LL; + bus_params.chip_id = ath10k_ahb_soc_read32(ar, SOC_CHIP_ID_ADDRESS); + if (bus_params.chip_id == 0xffffffff) { + ath10k_err(ar, "failed to get chip id\n"); + ret = -ENODEV; + goto err_halt_device; + } + + ret = ath10k_core_register(ar, &bus_params); + if (ret) { + ath10k_err(ar, "failed to register driver core: %d\n", ret); + goto err_halt_device; + } + + return 0; + +err_halt_device: + ath10k_ahb_halt_chip(ar); + ath10k_ahb_clock_disable(ar); + +err_free_irq: + ath10k_ahb_release_irq_intx(ar); + +err_free_pipes: + ath10k_pci_release_resource(ar); + +err_resource_deinit: + ath10k_ahb_resource_deinit(ar); + +err_core_destroy: + ath10k_core_destroy(ar); + + return ret; +} + +static void ath10k_ahb_remove(struct platform_device *pdev) +{ + struct ath10k *ar = platform_get_drvdata(pdev); + + ath10k_dbg(ar, ATH10K_DBG_AHB, "ahb remove\n"); + + ath10k_core_unregister(ar); + ath10k_ahb_irq_disable(ar); + ath10k_ahb_release_irq_intx(ar); + ath10k_pci_release_resource(ar); + ath10k_ahb_halt_chip(ar); + ath10k_ahb_clock_disable(ar); + ath10k_ahb_resource_deinit(ar); + ath10k_core_destroy(ar); +} + +static struct platform_driver ath10k_ahb_driver = { + .driver = { + .name = "ath10k_ahb", + .of_match_table = ath10k_ahb_of_match, + }, + .probe = ath10k_ahb_probe, + .remove = ath10k_ahb_remove, +}; + +int ath10k_ahb_init(void) +{ + int ret; + + ret = platform_driver_register(&ath10k_ahb_driver); + if (ret) + printk(KERN_ERR "failed to register ath10k ahb driver: %d\n", + ret); + return ret; +} + +void ath10k_ahb_exit(void) +{ + platform_driver_unregister(&ath10k_ahb_driver); +} diff --git a/drivers/net/wireless/ath/ath10k/ahb.h b/drivers/net/wireless/ath/ath10k/ahb.h new file mode 100644 index 000000000000..cee11a3ae2a5 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/ahb.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2016 Qualcomm Atheros, Inc. All rights reserved. + * Copyright (c) 2015 The Linux Foundation. All rights reserved. + */ + +#ifndef _AHB_H_ +#define _AHB_H_ + +#include <linux/platform_device.h> + +struct ath10k_ahb { + struct platform_device *pdev; + void __iomem *mem; + unsigned long mem_len; + void __iomem *gcc_mem; + void __iomem *tcsr_mem; + + int irq; + + struct clk *cmd_clk; + struct clk *ref_clk; + struct clk *rtc_clk; + + struct reset_control *core_cold_rst; + struct reset_control *radio_cold_rst; + struct reset_control *radio_warm_rst; + struct reset_control *radio_srif_rst; + struct reset_control *cpu_init_rst; +}; + +#ifdef CONFIG_ATH10K_AHB + +#define ATH10K_GCC_REG_BASE 0x1800000 +#define ATH10K_GCC_REG_SIZE 0x60000 + +#define ATH10K_TCSR_REG_BASE 0x1900000 +#define ATH10K_TCSR_REG_SIZE 0x80000 + +#define ATH10K_AHB_GCC_FEPLL_PLL_DIV 0x2f020 +#define ATH10K_AHB_WIFI_SCRATCH_5_REG 0x4f014 + +#define ATH10K_AHB_WLAN_CORE_ID_REG 0x82030 + +#define ATH10K_AHB_TCSR_WIFI0_GLB_CFG 0x49000 +#define ATH10K_AHB_TCSR_WIFI1_GLB_CFG 0x49004 +#define TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK BIT(25) + +#define ATH10K_AHB_TCSR_WCSS0_HALTREQ 0x52000 +#define ATH10K_AHB_TCSR_WCSS1_HALTREQ 0x52010 +#define ATH10K_AHB_TCSR_WCSS0_HALTACK 0x52004 +#define ATH10K_AHB_TCSR_WCSS1_HALTACK 0x52014 + +#define ATH10K_AHB_AXI_BUS_HALT_TIMEOUT 10 /* msec */ +#define AHB_AXI_BUS_HALT_REQ 1 +#define AHB_AXI_BUS_HALT_ACK 1 + +#define ATH10K_AHB_CORE_CTRL_CPU_INTR_MASK 1 + +int ath10k_ahb_init(void); +void ath10k_ahb_exit(void); + +#else /* CONFIG_ATH10K_AHB */ + +static inline int ath10k_ahb_init(void) +{ + return 0; +} + +static inline void ath10k_ahb_exit(void) +{ +} + +#endif /* CONFIG_ATH10K_AHB */ + +#endif /* _AHB_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c index 1a2ef51b69d9..52118867ecde 100644 --- a/drivers/net/wireless/ath/ath10k/bmi.c +++ b/drivers/net/wireless/ath/ath10k/bmi.c @@ -1,24 +1,25 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ +#include <linux/export.h> #include "bmi.h" #include "hif.h" #include "debug.h" #include "htc.h" +#include "hw.h" + +void ath10k_bmi_start(struct ath10k *ar) +{ + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n"); + + ar->bmi.done_sent = false; +} +EXPORT_SYMBOL(ath10k_bmi_start); int ath10k_bmi_done(struct ath10k *ar) { @@ -26,8 +27,10 @@ int ath10k_bmi_done(struct ath10k *ar) u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done); int ret; + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n"); + if (ar->bmi.done_sent) { - ath10k_dbg(ATH10K_DBG_CORE, "%s skipped\n", __func__); + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n"); return 0; } @@ -36,11 +39,10 @@ int ath10k_bmi_done(struct ath10k *ar) ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL); if (ret) { - ath10k_warn("unable to write to the device: %d\n", ret); + ath10k_warn(ar, "unable to write to the device: %d\n", ret); return ret; } - ath10k_dbg(ATH10K_DBG_CORE, "BMI done\n"); return 0; } @@ -53,8 +55,10 @@ int ath10k_bmi_get_target_info(struct ath10k *ar, u32 resplen = sizeof(resp.get_target_info); int ret; + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n"); + if (ar->bmi.done_sent) { - ath10k_warn("BMI Get Target Info Command disallowed\n"); + ath10k_warn(ar, "BMI Get Target Info Command disallowed\n"); return -EBUSY; } @@ -62,18 +66,90 @@ int ath10k_bmi_get_target_info(struct ath10k *ar, ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen); if (ret) { - ath10k_warn("unable to get target info from device\n"); + ath10k_warn(ar, "unable to get target info from device\n"); return ret; } if (resplen < sizeof(resp.get_target_info)) { - ath10k_warn("invalid get_target_info response length (%d)\n", + ath10k_warn(ar, "invalid get_target_info response length (%d)\n", resplen); return -EIO; } target_info->version = __le32_to_cpu(resp.get_target_info.version); target_info->type = __le32_to_cpu(resp.get_target_info.type); + + return 0; +} + +#define TARGET_VERSION_SENTINAL 0xffffffffu + +int ath10k_bmi_get_target_info_sdio(struct ath10k *ar, + struct bmi_target_info *target_info) +{ + struct bmi_cmd cmd; + union bmi_resp resp; + u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info); + u32 resplen, ver_len; + __le32 tmp; + int ret; + + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info SDIO\n"); + + if (ar->bmi.done_sent) { + ath10k_warn(ar, "BMI Get Target Info Command disallowed\n"); + return -EBUSY; + } + + cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO); + + /* Step 1: Read 4 bytes of the target info and check if it is + * the special sentinel version word or the first word in the + * version response. + */ + resplen = sizeof(u32); + ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &tmp, &resplen); + if (ret) { + ath10k_warn(ar, "unable to read from device\n"); + return ret; + } + + /* Some SDIO boards have a special sentinel byte before the real + * version response. + */ + if (__le32_to_cpu(tmp) == TARGET_VERSION_SENTINAL) { + /* Step 1b: Read the version length */ + resplen = sizeof(u32); + ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0, &tmp, + &resplen); + if (ret) { + ath10k_warn(ar, "unable to read from device\n"); + return ret; + } + } + + ver_len = __le32_to_cpu(tmp); + + /* Step 2: Check the target info length */ + if (ver_len != sizeof(resp.get_target_info)) { + ath10k_warn(ar, "Unexpected target info len: %u. Expected: %zu\n", + ver_len, sizeof(resp.get_target_info)); + return -EINVAL; + } + + /* Step 3: Read the rest of the version response */ + resplen = sizeof(resp.get_target_info) - sizeof(u32); + ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0, + &resp.get_target_info.version, + &resplen); + if (ret) { + ath10k_warn(ar, "unable to read from device\n"); + return ret; + } + + target_info->version = __le32_to_cpu(resp.get_target_info.version); + target_info->type = __le32_to_cpu(resp.get_target_info.type); + return 0; } @@ -86,15 +162,14 @@ int ath10k_bmi_read_memory(struct ath10k *ar, u32 rxlen; int ret; + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n", + address, length); + if (ar->bmi.done_sent) { - ath10k_warn("command disallowed\n"); + ath10k_warn(ar, "command disallowed\n"); return -EBUSY; } - ath10k_dbg(ATH10K_DBG_CORE, - "%s: (device: 0x%p, address: 0x%x, length: %d)\n", - __func__, ar, address, length); - while (length) { rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE); @@ -105,7 +180,8 @@ int ath10k_bmi_read_memory(struct ath10k *ar, ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &rxlen); if (ret) { - ath10k_warn("unable to read from the device\n"); + ath10k_warn(ar, "unable to read from the device (%d)\n", + ret); return ret; } @@ -117,6 +193,70 @@ int ath10k_bmi_read_memory(struct ath10k *ar, return 0; } +EXPORT_SYMBOL(ath10k_bmi_read_memory); + +int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val) +{ + struct bmi_cmd cmd; + u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.write_soc_reg); + int ret; + + ath10k_dbg(ar, ATH10K_DBG_BMI, + "bmi write soc register 0x%08x val 0x%08x\n", + address, reg_val); + + if (ar->bmi.done_sent) { + ath10k_warn(ar, "bmi write soc register command in progress\n"); + return -EBUSY; + } + + cmd.id = __cpu_to_le32(BMI_WRITE_SOC_REGISTER); + cmd.write_soc_reg.addr = __cpu_to_le32(address); + cmd.write_soc_reg.value = __cpu_to_le32(reg_val); + + ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL); + if (ret) { + ath10k_warn(ar, "Unable to write soc register to device: %d\n", + ret); + return ret; + } + + return 0; +} + +int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val) +{ + struct bmi_cmd cmd; + union bmi_resp resp; + u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_soc_reg); + u32 resplen = sizeof(resp.read_soc_reg); + int ret; + + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register 0x%08x\n", + address); + + if (ar->bmi.done_sent) { + ath10k_warn(ar, "bmi read soc register command in progress\n"); + return -EBUSY; + } + + cmd.id = __cpu_to_le32(BMI_READ_SOC_REGISTER); + cmd.read_soc_reg.addr = __cpu_to_le32(address); + + ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen); + if (ret) { + ath10k_warn(ar, "Unable to read soc register from device: %d\n", + ret); + return ret; + } + + *reg_val = __le32_to_cpu(resp.read_soc_reg.value); + + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register value 0x%08x\n", + *reg_val); + + return 0; +} int ath10k_bmi_write_memory(struct ath10k *ar, u32 address, const void *buffer, u32 length) @@ -126,15 +266,14 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 txlen; int ret; + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n", + address, length); + if (ar->bmi.done_sent) { - ath10k_warn("command disallowed\n"); + ath10k_warn(ar, "command disallowed\n"); return -EBUSY; } - ath10k_dbg(ATH10K_DBG_CORE, - "%s: (device: 0x%p, address: 0x%x, length: %d)\n", - __func__, ar, address, length); - while (length) { txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen); @@ -149,7 +288,8 @@ int ath10k_bmi_write_memory(struct ath10k *ar, ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen, NULL, NULL); if (ret) { - ath10k_warn("unable to write to the device\n"); + ath10k_warn(ar, "unable to write to the device (%d)\n", + ret); return ret; } @@ -164,7 +304,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar, return 0; } -int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param) +int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result) { struct bmi_cmd cmd; union bmi_resp resp; @@ -172,32 +312,81 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param) u32 resplen = sizeof(resp.execute); int ret; + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n", + address, param); + if (ar->bmi.done_sent) { - ath10k_warn("command disallowed\n"); + ath10k_warn(ar, "command disallowed\n"); return -EBUSY; } - ath10k_dbg(ATH10K_DBG_CORE, - "%s: (device: 0x%p, address: 0x%x, param: %d)\n", - __func__, ar, address, *param); - cmd.id = __cpu_to_le32(BMI_EXECUTE); cmd.execute.addr = __cpu_to_le32(address); - cmd.execute.param = __cpu_to_le32(*param); + cmd.execute.param = __cpu_to_le32(param); ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen); if (ret) { - ath10k_warn("unable to read from the device\n"); + ath10k_warn(ar, "unable to read from the device\n"); return ret; } if (resplen < sizeof(resp.execute)) { - ath10k_warn("invalid execute response length (%d)\n", + ath10k_warn(ar, "invalid execute response length (%d)\n", resplen); - return ret; + return -EIO; } - *param = __le32_to_cpu(resp.execute.result); + *result = __le32_to_cpu(resp.execute.result); + + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result); + + return 0; +} + +static int ath10k_bmi_lz_data_large(struct ath10k *ar, const void *buffer, u32 length) +{ + struct bmi_cmd *cmd; + u32 hdrlen = sizeof(cmd->id) + sizeof(cmd->lz_data); + u32 txlen; + int ret; + size_t buf_len; + + ath10k_dbg(ar, ATH10K_DBG_BMI, "large bmi lz data buffer 0x%p length %d\n", + buffer, length); + + if (ar->bmi.done_sent) { + ath10k_warn(ar, "command disallowed\n"); + return -EBUSY; + } + + buf_len = sizeof(*cmd) + BMI_MAX_LARGE_DATA_SIZE - BMI_MAX_DATA_SIZE; + cmd = kzalloc(buf_len, GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + while (length) { + txlen = min(length, BMI_MAX_LARGE_DATA_SIZE - hdrlen); + + WARN_ON_ONCE(txlen & 3); + + cmd->id = __cpu_to_le32(BMI_LZ_DATA); + cmd->lz_data.len = __cpu_to_le32(txlen); + memcpy(cmd->lz_data.payload, buffer, txlen); + + ret = ath10k_hif_exchange_bmi_msg(ar, cmd, hdrlen + txlen, + NULL, NULL); + if (ret) { + ath10k_warn(ar, "unable to write to the device\n"); + kfree(cmd); + return ret; + } + + buffer += txlen; + length -= txlen; + } + + kfree(cmd); + return 0; } @@ -208,8 +397,11 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length) u32 txlen; int ret; + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n", + buffer, length); + if (ar->bmi.done_sent) { - ath10k_warn("command disallowed\n"); + ath10k_warn(ar, "command disallowed\n"); return -EBUSY; } @@ -225,7 +417,7 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length) ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen, NULL, NULL); if (ret) { - ath10k_warn("unable to write to the device\n"); + ath10k_warn(ar, "unable to write to the device\n"); return ret; } @@ -242,8 +434,11 @@ int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address) u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start); int ret; + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n", + address); + if (ar->bmi.done_sent) { - ath10k_warn("command disallowed\n"); + ath10k_warn(ar, "command disallowed\n"); return -EBUSY; } @@ -252,7 +447,7 @@ int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address) ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL); if (ret) { - ath10k_warn("unable to Start LZ Stream to the device\n"); + ath10k_warn(ar, "unable to Start LZ Stream to the device\n"); return ret; } @@ -267,6 +462,10 @@ int ath10k_bmi_fast_download(struct ath10k *ar, u32 trailer_len = length - head_len; int ret; + ath10k_dbg(ar, ATH10K_DBG_BMI, + "bmi fast download address 0x%x buffer 0x%p length %d\n", + address, buffer, length); + ret = ath10k_bmi_lz_stream_start(ar, address); if (ret) return ret; @@ -275,7 +474,11 @@ int ath10k_bmi_fast_download(struct ath10k *ar, if (trailer_len > 0) memcpy(trailer, buffer + head_len, trailer_len); - ret = ath10k_bmi_lz_data(ar, buffer, head_len); + if (ar->hw_params.bmi_large_size_download) + ret = ath10k_bmi_lz_data_large(ar, buffer, head_len); + else + ret = ath10k_bmi_lz_data(ar, buffer, head_len); + if (ret) return ret; @@ -293,3 +496,26 @@ int ath10k_bmi_fast_download(struct ath10k *ar, return ret; } + +int ath10k_bmi_set_start(struct ath10k *ar, u32 address) +{ + struct bmi_cmd cmd; + u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.set_app_start); + int ret; + + if (ar->bmi.done_sent) { + ath10k_warn(ar, "bmi set start command disallowed\n"); + return -EBUSY; + } + + cmd.id = __cpu_to_le32(BMI_SET_APP_START); + cmd.set_app_start.addr = __cpu_to_le32(address); + + ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL); + if (ret) { + ath10k_warn(ar, "unable to set start to the device:%d\n", ret); + return ret; + } + + return 0; +} diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h index 32c56aa33a5e..0685c0d2d4ea 100644 --- a/drivers/net/wireless/ath/ath10k/bmi.h +++ b/drivers/net/wireless/ath/ath10k/bmi.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc. */ #ifndef _BMI_H_ @@ -56,6 +45,15 @@ sizeof(u32) + \ sizeof(u32)) +/* Maximum data size used for large BMI transfers */ +#define BMI_MAX_LARGE_DATA_SIZE 2048 + +/* len = cmd + addr + length */ +#define BMI_MAX_LARGE_CMDBUF_SIZE (BMI_MAX_LARGE_DATA_SIZE + \ + sizeof(u32) + \ + sizeof(u32) + \ + sizeof(u32)) + /* BMI Commands */ enum bmi_cmd_id { @@ -82,6 +80,23 @@ enum bmi_cmd_id { #define BMI_NVRAM_SEG_NAME_SZ 16 +#define BMI_PARAM_GET_EEPROM_BOARD_ID 0x10 +#define BMI_PARAM_GET_FLASH_BOARD_ID 0x8000 +#define BMI_PARAM_FLASH_SECTION_ALL 0x10000 + +/* Dual-band Extended Board ID */ +#define BMI_PARAM_GET_EXT_BOARD_ID 0x40000 +#define ATH10K_BMI_EXT_BOARD_ID_SUPPORT 0x40000 + +#define ATH10K_BMI_BOARD_ID_FROM_OTP_MASK 0x7c00 +#define ATH10K_BMI_BOARD_ID_FROM_OTP_LSB 10 + +#define ATH10K_BMI_CHIP_ID_FROM_OTP_MASK 0x18000 +#define ATH10K_BMI_CHIP_ID_FROM_OTP_LSB 15 + +#define ATH10K_BMI_BOARD_ID_STATUS_MASK 0xff +#define ATH10K_BMI_EBOARD_ID_STATUS_MASK 0xff + struct bmi_cmd { __le32 id; /* enum bmi_cmd_id */ union { @@ -94,7 +109,7 @@ struct bmi_cmd { struct { __le32 addr; __le32 len; - u8 payload[0]; + u8 payload[]; } write_mem; struct { __le32 addr; @@ -123,18 +138,18 @@ struct bmi_cmd { } rompatch_uninstall; struct { __le32 count; - __le32 patch_ids[0]; /* length of @count */ + __le32 patch_ids[]; /* length of @count */ } rompatch_activate; struct { __le32 count; - __le32 patch_ids[0]; /* length of @count */ + __le32 patch_ids[]; /* length of @count */ } rompatch_deactivate; struct { __le32 addr; } lz_start; struct { __le32 len; /* max BMI_MAX_DATA_SIZE */ - u8 payload[0]; /* length of @len */ + u8 payload[]; /* length of @len */ } lz_data; struct { u8 name[BMI_NVRAM_SEG_NAME_SZ]; @@ -145,7 +160,7 @@ struct bmi_cmd { union bmi_resp { struct { - u8 payload[0]; + DECLARE_FLEX_ARRAY(u8, payload); } read_mem; struct { __le32 result; @@ -166,7 +181,8 @@ union bmi_resp { } rompatch_uninstall; struct { /* 0 = nothing executed - * otherwise = NVRAM segment return value */ + * otherwise = NVRAM segment return value + */ __le32 result; } nvram_process; u8 payload[BMI_MAX_CMDBUF_SIZE]; @@ -177,16 +193,47 @@ struct bmi_target_info { u32 type; }; +struct bmi_segmented_file_header { + __le32 magic_num; + __le32 file_flags; + u8 data[]; +}; + +struct bmi_segmented_metadata { + __le32 addr; + __le32 length; + u8 data[]; +}; + +#define BMI_SGMTFILE_MAGIC_NUM 0x544d4753 /* "SGMT" */ +#define BMI_SGMTFILE_FLAG_COMPRESS 1 + +/* Special values for bmi_segmented_metadata.length (all have high bit set) */ -/* in msec */ -#define BMI_COMMUNICATION_TIMEOUT_HZ (1*HZ) +/* end of segmented data */ +#define BMI_SGMTFILE_DONE 0xffffffff + +/* Board Data segment */ +#define BMI_SGMTFILE_BDDATA 0xfffffffe + +/* set beginning address */ +#define BMI_SGMTFILE_BEGINADDR 0xfffffffd + +/* immediate function execution */ +#define BMI_SGMTFILE_EXEC 0xfffffffc + +/* in jiffies */ +#define BMI_COMMUNICATION_TIMEOUT_HZ (3 * HZ) #define BMI_CE_NUM_TO_TARG 0 #define BMI_CE_NUM_TO_HOST 1 +void ath10k_bmi_start(struct ath10k *ar); int ath10k_bmi_done(struct ath10k *ar); int ath10k_bmi_get_target_info(struct ath10k *ar, struct bmi_target_info *target_info); +int ath10k_bmi_get_target_info_sdio(struct ath10k *ar, + struct bmi_target_info *target_info); int ath10k_bmi_read_memory(struct ath10k *ar, u32 address, void *buffer, u32 length); int ath10k_bmi_write_memory(struct ath10k *ar, u32 address, @@ -200,7 +247,8 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 address, \ addr = host_interest_item_address(HI_ITEM(item)); \ ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \ - *val = __le32_to_cpu(tmp); \ + if (!ret) \ + *val = __le32_to_cpu(tmp); \ ret; \ }) @@ -216,9 +264,14 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 address, ret; \ }) -int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param); +int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result); int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address); int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length); + int ath10k_bmi_fast_download(struct ath10k *ar, u32 address, const void *buffer, u32 length); +int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val); +int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val); +int ath10k_bmi_set_start(struct ath10k *ar, u32 address); + #endif /* _BMI_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 61a8ac70d3ca..7bbda46cfd93 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -1,22 +1,14 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ +#include <linux/export.h> #include "hif.h" -#include "pci.h" #include "ce.h" #include "debug.h" @@ -33,13 +25,13 @@ * Each ring consists of a number of descriptors which specify * an address, length, and meta-data. * - * Typically, one side of the PCIe interconnect (Host or Target) + * Typically, one side of the PCIe/AHB/SNOC interconnect (Host or Target) * controls one ring and the other side controls the other ring. * The source side chooses when to initiate a transfer and it * chooses what to send (buffer address, length). The destination * side keeps a supply of "anonymous receive buffers" available and * it handles incoming data as it arrives (when the destination - * recieves an interrupt). + * receives an interrupt). * * The sender may send a simple buffer (address/length) or it may * send a small list of buffers. When a small list is sent, hardware @@ -59,242 +51,386 @@ * the buffer is sent/received. */ +static inline u32 shadow_sr_wr_ind_addr(struct ath10k *ar, + struct ath10k_ce_pipe *ce_state) +{ + u32 ce_id = ce_state->id; + u32 addr = 0; + + switch (ce_id) { + case 0: + addr = 0x00032000; + break; + case 3: + addr = 0x0003200C; + break; + case 4: + addr = 0x00032010; + break; + case 5: + addr = 0x00032014; + break; + case 7: + addr = 0x0003201C; + break; + default: + ath10k_warn(ar, "invalid CE id: %d", ce_id); + break; + } + return addr; +} + +static inline unsigned int +ath10k_set_ring_byte(unsigned int offset, + const struct ath10k_hw_ce_regs_addr_map *addr_map) +{ + return ((offset << addr_map->lsb) & addr_map->mask); +} + +static inline u32 ath10k_ce_read32(struct ath10k *ar, u32 offset) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + return ce->bus_ops->read32(ar, offset); +} + +static inline void ath10k_ce_write32(struct ath10k *ar, u32 offset, u32 value) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + ce->bus_ops->write32(ar, offset, value); +} + static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n); + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->dst_wr_index_addr, n); } static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS); + return ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->dst_wr_index_addr); } static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - void __iomem *indicator_addr; - - if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) { - ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n); - return; - } - - /* workaround for QCA988x_1.0 HW CE */ - indicator_addr = ar_pci->mem + ce_ctrl_addr + DST_WATERMARK_ADDRESS; - - if (ce_ctrl_addr == ath10k_ce_base_address(CDC_WAR_DATA_CE)) { - iowrite32((CDC_WAR_MAGIC_STR | n), indicator_addr); - } else { - unsigned long irq_flags; - local_irq_save(irq_flags); - iowrite32(1, indicator_addr); - - /* - * PCIE write waits for ACK in IPQ8K, there is no - * need to read back value. - */ - (void)ioread32(indicator_addr); - (void)ioread32(indicator_addr); /* conservative */ - - ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n); - - iowrite32(0, indicator_addr); - local_irq_restore(irq_flags); - } + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->sr_wr_index_addr, n); } static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS); + return ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->sr_wr_index_addr); +} + +static inline u32 ath10k_ce_src_ring_read_index_from_ddr(struct ath10k *ar, + u32 ce_id) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + return ce->vaddr_rri[ce_id] & CE_DDR_RRI_MASK; } static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS); + struct ath10k_ce *ce = ath10k_ce_priv(ar); + u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; + u32 index; + + if (ar->hw_params.rri_on_ddr && + (ce_state->attr_flags & CE_ATTR_DIS_INTR)) + index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_id); + else + index = ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->current_srri_addr); + + return index; +} + +static inline void +ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar, + struct ath10k_ce_pipe *ce_state, + unsigned int value) +{ + ath10k_ce_write32(ar, shadow_sr_wr_ind_addr(ar, ce_state), value); } static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar, - u32 ce_ctrl_addr, - unsigned int addr) + u32 ce_id, + u64 addr) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; + u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id); + u32 addr_lo = lower_32_bits(addr); + + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->sr_base_addr_lo, addr_lo); + + if (ce_state->ops->ce_set_src_ring_base_addr_hi) { + ce_state->ops->ce_set_src_ring_base_addr_hi(ar, ce_ctrl_addr, + addr); + } +} + +static void ath10k_ce_set_src_ring_base_addr_hi(struct ath10k *ar, + u32 ce_ctrl_addr, + u64 addr) { - ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr); + u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK; + + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->sr_base_addr_hi, addr_hi); } static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n); + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->sr_size_addr, n); } static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - u32 ctrl1_addr = ath10k_pci_read32((ar), - (ce_ctrl_addr) + CE_CTRL1_ADDRESS); + const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; + + u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr + + ctrl_regs->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, - (ctrl1_addr & ~CE_CTRL1_DMAX_LENGTH_MASK) | - CE_CTRL1_DMAX_LENGTH_SET(n)); + ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr, + (ctrl1_addr & ~(ctrl_regs->dmax->mask)) | + ath10k_set_ring_byte(n, ctrl_regs->dmax)); } static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS); + const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; - ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, - (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) | - CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n)); + u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr + + ctrl_regs->addr); + + ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr, + (ctrl1_addr & ~(ctrl_regs->src_ring->mask)) | + ath10k_set_ring_byte(n, ctrl_regs->src_ring)); } static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS); + const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; + + u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr + + ctrl_regs->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, - (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) | - CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n)); + ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr, + (ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) | + ath10k_set_ring_byte(n, ctrl_regs->dst_ring)); +} + +static inline + u32 ath10k_ce_dest_ring_read_index_from_ddr(struct ath10k *ar, u32 ce_id) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + return (ce->vaddr_rri[ce_id] >> CE_DDR_DRRI_SHIFT) & + CE_DDR_RRI_MASK; } static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS); + struct ath10k_ce *ce = ath10k_ce_priv(ar); + u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; + u32 index; + + if (ar->hw_params.rri_on_ddr && + (ce_state->attr_flags & CE_ATTR_DIS_INTR)) + index = ath10k_ce_dest_ring_read_index_from_ddr(ar, ce_id); + else + index = ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->current_drri_addr); + + return index; } static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar, - u32 ce_ctrl_addr, - u32 addr) + u32 ce_id, + u64 addr) { - ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr); + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; + u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id); + u32 addr_lo = lower_32_bits(addr); + + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->dr_base_addr_lo, addr_lo); + + if (ce_state->ops->ce_set_dest_ring_base_addr_hi) { + ce_state->ops->ce_set_dest_ring_base_addr_hi(ar, ce_ctrl_addr, + addr); + } +} + +static void ath10k_ce_set_dest_ring_base_addr_hi(struct ath10k *ar, + u32 ce_ctrl_addr, + u64 addr) +{ + u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK; + u32 reg_value; + + reg_value = ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->dr_base_addr_hi); + reg_value &= ~CE_DESC_ADDR_HI_MASK; + reg_value |= addr_hi; + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->dr_base_addr_hi, reg_value); } static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n); + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->dr_size_addr, n); } static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS); + const struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr; + u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS, - (addr & ~SRC_WATERMARK_HIGH_MASK) | - SRC_WATERMARK_HIGH_SET(n)); + ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr, + (addr & ~(srcr_wm->wm_high->mask)) | + (ath10k_set_ring_byte(n, srcr_wm->wm_high))); } static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS); + const struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr; + u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS, - (addr & ~SRC_WATERMARK_LOW_MASK) | - SRC_WATERMARK_LOW_SET(n)); + ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr, + (addr & ~(srcr_wm->wm_low->mask)) | + (ath10k_set_ring_byte(n, srcr_wm->wm_low))); } static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS); + const struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr; + u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS, - (addr & ~DST_WATERMARK_HIGH_MASK) | - DST_WATERMARK_HIGH_SET(n)); + ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr, + (addr & ~(dstr_wm->wm_high->mask)) | + (ath10k_set_ring_byte(n, dstr_wm->wm_high))); } static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS); + const struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr; + u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS, - (addr & ~DST_WATERMARK_LOW_MASK) | - DST_WATERMARK_LOW_SET(n)); + ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr, + (addr & ~(dstr_wm->wm_low->mask)) | + (ath10k_set_ring_byte(n, dstr_wm->wm_low))); } static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar, u32 ce_ctrl_addr) { - u32 host_ie_addr = ath10k_pci_read32(ar, - ce_ctrl_addr + HOST_IE_ADDRESS); + const struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie; + + u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->host_ie_addr); - ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, - host_ie_addr | HOST_IE_COPY_COMPLETE_MASK); + ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr, + host_ie_addr | host_ie->copy_complete->mask); } static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar, u32 ce_ctrl_addr) { - u32 host_ie_addr = ath10k_pci_read32(ar, - ce_ctrl_addr + HOST_IE_ADDRESS); + const struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie; - ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, - host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK); + u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->host_ie_addr); + + ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr, + host_ie_addr & ~(host_ie->copy_complete->mask)); } static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar, u32 ce_ctrl_addr) { - u32 host_ie_addr = ath10k_pci_read32(ar, - ce_ctrl_addr + HOST_IE_ADDRESS); + const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; + + u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->host_ie_addr); - ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, - host_ie_addr & ~CE_WATERMARK_MASK); + ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr, + host_ie_addr & ~(wm_regs->wm_mask)); } -static inline void ath10k_ce_error_intr_enable(struct ath10k *ar, - u32 ce_ctrl_addr) +static inline void ath10k_ce_error_intr_disable(struct ath10k *ar, + u32 ce_ctrl_addr) { - u32 misc_ie_addr = ath10k_pci_read32(ar, - ce_ctrl_addr + MISC_IE_ADDRESS); + const struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs; - ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS, - misc_ie_addr | CE_ERROR_MASK); + u32 misc_ie_addr = ath10k_ce_read32(ar, + ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr); + + ath10k_ce_write32(ar, + ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr, + misc_ie_addr & ~(misc_regs->err_mask)); } static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int mask) { - ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask); -} + const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; + ath10k_ce_write32(ar, ce_ctrl_addr + wm_regs->addr, mask); +} /* - * Guts of ath10k_ce_send, used by both ath10k_ce_send and - * ath10k_ce_sendlist_send. + * Guts of ath10k_ce_send. * The caller takes responsibility for any needed locking. */ -static int ath10k_ce_send_nolock(struct ce_state *ce_state, - void *per_transfer_context, - u32 buffer, - unsigned int nbytes, - unsigned int transfer_id, - unsigned int flags) +static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, + void *per_transfer_context, + dma_addr_t buffer, + unsigned int nbytes, + unsigned int transfer_id, + unsigned int flags) { struct ath10k *ar = ce_state->ar; - struct ce_ring_state *src_ring = ce_state->src_ring; - struct ce_desc *desc, *sdesc; + struct ath10k_ce_ring *src_ring = ce_state->src_ring; + struct ce_desc *desc, sdesc; unsigned int nentries_mask = src_ring->nentries_mask; unsigned int sw_index = src_ring->sw_index; unsigned int write_index = src_ring->write_index; @@ -303,20 +439,17 @@ static int ath10k_ce_send_nolock(struct ce_state *ce_state, int ret = 0; if (nbytes > ce_state->src_sz_max) - ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n", + ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n", __func__, nbytes, ce_state->src_sz_max); - ath10k_pci_wake(ar); - if (unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) <= 0)) { - ret = -EIO; + ret = -ENOSR; goto exit; } desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space, write_index); - sdesc = CE_SRC_RING_TO_DESC(src_ring->shadow_base, write_index); desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA); @@ -325,11 +458,11 @@ static int ath10k_ce_send_nolock(struct ce_state *ce_state, if (flags & CE_SEND_FLAG_BYTE_SWAP) desc_flags |= CE_DESC_FLAGS_BYTE_SWAP; - sdesc->addr = __cpu_to_le32(buffer); - sdesc->nbytes = __cpu_to_le16(nbytes); - sdesc->flags = __cpu_to_le16(desc_flags); + sdesc.addr = __cpu_to_le32(buffer); + sdesc.nbytes = __cpu_to_le16(nbytes); + sdesc.flags = __cpu_to_le16(desc_flags); - *desc = *sdesc; + *desc = sdesc; src_ring->per_transfer_context[write_index] = per_transfer_context; @@ -342,151 +475,290 @@ static int ath10k_ce_send_nolock(struct ce_state *ce_state, src_ring->write_index = write_index; exit: - ath10k_pci_sleep(ar); return ret; } -int ath10k_ce_send(struct ce_state *ce_state, +static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state, + void *per_transfer_context, + dma_addr_t buffer, + unsigned int nbytes, + unsigned int transfer_id, + unsigned int flags) +{ + struct ath10k *ar = ce_state->ar; + struct ath10k_ce_ring *src_ring = ce_state->src_ring; + struct ce_desc_64 *desc, sdesc; + unsigned int nentries_mask = src_ring->nentries_mask; + unsigned int sw_index; + unsigned int write_index = src_ring->write_index; + u32 ctrl_addr = ce_state->ctrl_addr; + __le32 *addr; + u32 desc_flags = 0; + int ret = 0; + + if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) + return -ESHUTDOWN; + + if (nbytes > ce_state->src_sz_max) + ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n", + __func__, nbytes, ce_state->src_sz_max); + + if (ar->hw_params.rri_on_ddr) + sw_index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_state->id); + else + sw_index = src_ring->sw_index; + + if (unlikely(CE_RING_DELTA(nentries_mask, + write_index, sw_index - 1) <= 0)) { + ret = -ENOSR; + goto exit; + } + + desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space, + write_index); + + desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA); + + if (flags & CE_SEND_FLAG_GATHER) + desc_flags |= CE_DESC_FLAGS_GATHER; + + if (flags & CE_SEND_FLAG_BYTE_SWAP) + desc_flags |= CE_DESC_FLAGS_BYTE_SWAP; + + addr = (__le32 *)&sdesc.addr; + + flags |= upper_32_bits(buffer) & CE_DESC_ADDR_HI_MASK; + addr[0] = __cpu_to_le32(buffer); + addr[1] = __cpu_to_le32(flags); + if (flags & CE_SEND_FLAG_GATHER) + addr[1] |= __cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER); + else + addr[1] &= ~(__cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER)); + + sdesc.nbytes = __cpu_to_le16(nbytes); + sdesc.flags = __cpu_to_le16(desc_flags); + + *desc = sdesc; + + src_ring->per_transfer_context[write_index] = per_transfer_context; + + /* Update Source Ring Write Index */ + write_index = CE_RING_IDX_INCR(nentries_mask, write_index); + + if (!(flags & CE_SEND_FLAG_GATHER)) { + if (ar->hw_params.shadow_reg_support) + ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state, + write_index); + else + ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, + write_index); + } + + src_ring->write_index = write_index; +exit: + return ret; +} + +int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, + void *per_transfer_context, + dma_addr_t buffer, + unsigned int nbytes, + unsigned int transfer_id, + unsigned int flags) +{ + return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context, + buffer, nbytes, transfer_id, flags); +} +EXPORT_SYMBOL(ath10k_ce_send_nolock); + +void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe) +{ + struct ath10k *ar = pipe->ar; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_ring *src_ring = pipe->src_ring; + u32 ctrl_addr = pipe->ctrl_addr; + + lockdep_assert_held(&ce->ce_lock); + + /* + * This function must be called only if there is an incomplete + * scatter-gather transfer (before index register is updated) + * that needs to be cleaned up. + */ + if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index)) + return; + + if (WARN_ON_ONCE(src_ring->write_index == + ath10k_ce_src_ring_write_index_get(ar, ctrl_addr))) + return; + + src_ring->write_index--; + src_ring->write_index &= src_ring->nentries_mask; + + src_ring->per_transfer_context[src_ring->write_index] = NULL; +} +EXPORT_SYMBOL(__ath10k_ce_send_revert); + +int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, void *per_transfer_context, - u32 buffer, + dma_addr_t buffer, unsigned int nbytes, unsigned int transfer_id, unsigned int flags) { struct ath10k *ar = ce_state->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); ret = ath10k_ce_send_nolock(ce_state, per_transfer_context, buffer, nbytes, transfer_id, flags); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return ret; } +EXPORT_SYMBOL(ath10k_ce_send); -void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer, - unsigned int nbytes, u32 flags) +int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe) { - unsigned int num_items = sendlist->num_items; - struct ce_sendlist_item *item; + struct ath10k *ar = pipe->ar; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + int delta; + + spin_lock_bh(&ce->ce_lock); + delta = CE_RING_DELTA(pipe->src_ring->nentries_mask, + pipe->src_ring->write_index, + pipe->src_ring->sw_index - 1); + spin_unlock_bh(&ce->ce_lock); - item = &sendlist->item[num_items]; - item->data = buffer; - item->u.nbytes = nbytes; - item->flags = flags; - sendlist->num_items++; + return delta; } +EXPORT_SYMBOL(ath10k_ce_num_free_src_entries); -int ath10k_ce_sendlist_send(struct ce_state *ce_state, - void *per_transfer_context, - struct ce_sendlist *sendlist, - unsigned int transfer_id) +int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe) { - struct ce_ring_state *src_ring = ce_state->src_ring; - struct ce_sendlist_item *item; - struct ath10k *ar = ce_state->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - unsigned int nentries_mask = src_ring->nentries_mask; - unsigned int num_items = sendlist->num_items; - unsigned int sw_index; - unsigned int write_index; - int i, delta, ret = -ENOMEM; + struct ath10k *ar = pipe->ar; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_ring *dest_ring = pipe->dest_ring; + unsigned int nentries_mask = dest_ring->nentries_mask; + unsigned int write_index = dest_ring->write_index; + unsigned int sw_index = dest_ring->sw_index; - spin_lock_bh(&ar_pci->ce_lock); + lockdep_assert_held(&ce->ce_lock); - sw_index = src_ring->sw_index; - write_index = src_ring->write_index; + return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); +} +EXPORT_SYMBOL(__ath10k_ce_rx_num_free_bufs); + +static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, + dma_addr_t paddr) +{ + struct ath10k *ar = pipe->ar; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_ring *dest_ring = pipe->dest_ring; + unsigned int nentries_mask = dest_ring->nentries_mask; + unsigned int write_index = dest_ring->write_index; + unsigned int sw_index = dest_ring->sw_index; + struct ce_desc *base = dest_ring->base_addr_owner_space; + struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index); + u32 ctrl_addr = pipe->ctrl_addr; - delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); + lockdep_assert_held(&ce->ce_lock); - if (delta >= num_items) { - /* - * Handle all but the last item uniformly. - */ - for (i = 0; i < num_items - 1; i++) { - item = &sendlist->item[i]; - ret = ath10k_ce_send_nolock(ce_state, - CE_SENDLIST_ITEM_CTXT, - (u32) item->data, - item->u.nbytes, transfer_id, - item->flags | - CE_SEND_FLAG_GATHER); - if (ret) - ath10k_warn("CE send failed for item: %d\n", i); - } - /* - * Provide valid context pointer for final item. - */ - item = &sendlist->item[i]; - ret = ath10k_ce_send_nolock(ce_state, per_transfer_context, - (u32) item->data, item->u.nbytes, - transfer_id, item->flags); - if (ret) - ath10k_warn("CE send failed for last item: %d\n", i); - } + if ((pipe->id != 5) && + CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0) + return -ENOSPC; - spin_unlock_bh(&ar_pci->ce_lock); + desc->addr = __cpu_to_le32(paddr); + desc->nbytes = 0; - return ret; + dest_ring->per_transfer_context[write_index] = ctx; + write_index = CE_RING_IDX_INCR(nentries_mask, write_index); + ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index); + dest_ring->write_index = write_index; + + return 0; } -int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state, - void *per_recv_context, - u32 buffer) +static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe, + void *ctx, + dma_addr_t paddr) { - struct ce_ring_state *dest_ring = ce_state->dest_ring; - u32 ctrl_addr = ce_state->ctrl_addr; - struct ath10k *ar = ce_state->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k *ar = pipe->ar; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_ring *dest_ring = pipe->dest_ring; unsigned int nentries_mask = dest_ring->nentries_mask; - unsigned int write_index; - unsigned int sw_index; - int ret; + unsigned int write_index = dest_ring->write_index; + unsigned int sw_index = dest_ring->sw_index; + struct ce_desc_64 *base = dest_ring->base_addr_owner_space; + struct ce_desc_64 *desc = + CE_DEST_RING_TO_DESC_64(base, write_index); + u32 ctrl_addr = pipe->ctrl_addr; - spin_lock_bh(&ar_pci->ce_lock); - write_index = dest_ring->write_index; - sw_index = dest_ring->sw_index; + lockdep_assert_held(&ce->ce_lock); - ath10k_pci_wake(ar); + if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0) + return -ENOSPC; - if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) { - struct ce_desc *base = dest_ring->base_addr_owner_space; - struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index); + desc->addr = __cpu_to_le64(paddr); + desc->addr &= __cpu_to_le64(CE_DESC_ADDR_MASK); - /* Update destination descriptor */ - desc->addr = __cpu_to_le32(buffer); - desc->nbytes = 0; + desc->nbytes = 0; - dest_ring->per_transfer_context[write_index] = - per_recv_context; + dest_ring->per_transfer_context[write_index] = ctx; + write_index = CE_RING_IDX_INCR(nentries_mask, write_index); + ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index); + dest_ring->write_index = write_index; - /* Update Destination Ring Write Index */ - write_index = CE_RING_IDX_INCR(nentries_mask, write_index); - ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index); - dest_ring->write_index = write_index; - ret = 0; - } else { - ret = -EIO; - } - ath10k_pci_sleep(ar); - spin_unlock_bh(&ar_pci->ce_lock); + return 0; +} + +void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries) +{ + struct ath10k *ar = pipe->ar; + struct ath10k_ce_ring *dest_ring = pipe->dest_ring; + unsigned int nentries_mask = dest_ring->nentries_mask; + unsigned int write_index = dest_ring->write_index; + u32 ctrl_addr = pipe->ctrl_addr; + u32 cur_write_idx = ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); + + /* Prevent CE ring stuck issue that will occur when ring is full. + * Make sure that write index is 1 less than read index. + */ + if (((cur_write_idx + nentries) & nentries_mask) == dest_ring->sw_index) + nentries -= 1; + + write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries); + ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index); + dest_ring->write_index = write_index; +} +EXPORT_SYMBOL(ath10k_ce_rx_update_write_idx); + +int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, + dma_addr_t paddr) +{ + struct ath10k *ar = pipe->ar; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + int ret; + + spin_lock_bh(&ce->ce_lock); + ret = pipe->ops->ce_rx_post_buf(pipe, ctx, paddr); + spin_unlock_bh(&ce->ce_lock); return ret; } +EXPORT_SYMBOL(ath10k_ce_rx_post_buf); /* * Guts of ath10k_ce_completed_recv_next. * The caller takes responsibility for any necessary locking. */ -static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state, - void **per_transfer_contextp, - u32 *bufferp, - unsigned int *nbytesp, - unsigned int *transfer_idp, - unsigned int *flagsp) -{ - struct ce_ring_state *dest_ring = ce_state->dest_ring; +static int + _ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp, + unsigned int *nbytesp) +{ + struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; unsigned int nentries_mask = dest_ring->nentries_mask; unsigned int sw_index = dest_ring->sw_index; @@ -512,21 +784,66 @@ static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state, desc->nbytes = 0; /* Return data from completed destination descriptor */ - *bufferp = __le32_to_cpu(sdesc.addr); *nbytesp = nbytes; - *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA); - if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP) - *flagsp = CE_RECV_FLAG_SWAPPED; - else - *flagsp = 0; + if (per_transfer_contextp) + *per_transfer_contextp = + dest_ring->per_transfer_context[sw_index]; + + /* Copy engine 5 (HTT Rx) will reuse the same transfer context. + * So update transfer context all CEs except CE5. + */ + if (ce_state->id != 5) + dest_ring->per_transfer_context[sw_index] = NULL; + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + dest_ring->sw_index = sw_index; + + return 0; +} + +static int +_ath10k_ce_completed_recv_next_nolock_64(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp, + unsigned int *nbytesp) +{ + struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; + unsigned int nentries_mask = dest_ring->nentries_mask; + unsigned int sw_index = dest_ring->sw_index; + struct ce_desc_64 *base = dest_ring->base_addr_owner_space; + struct ce_desc_64 *desc = + CE_DEST_RING_TO_DESC_64(base, sw_index); + struct ce_desc_64 sdesc; + u16 nbytes; + + /* Copy in one go for performance reasons */ + sdesc = *desc; + + nbytes = __le16_to_cpu(sdesc.nbytes); + if (nbytes == 0) { + /* This closes a relatively unusual race where the Host + * sees the updated DRRI before the update to the + * corresponding descriptor has completed. We treat this + * as a descriptor that is not yet done. + */ + return -EIO; + } + + desc->nbytes = 0; + + /* Return data from completed destination descriptor */ + *nbytesp = nbytes; if (per_transfer_contextp) *per_transfer_contextp = dest_ring->per_transfer_context[sw_index]; - /* sanity */ - dest_ring->per_transfer_context[sw_index] = NULL; + /* Copy engine 5 (HTT Rx) will reuse the same transfer context. + * So update transfer context all CEs except CE5. + */ + if (ce_state->id != 5) + dest_ring->per_transfer_context[sw_index] = NULL; /* Update sw_index */ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); @@ -535,38 +852,46 @@ static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state, return 0; } -int ath10k_ce_completed_recv_next(struct ce_state *ce_state, +int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, + void **per_transfer_ctx, + unsigned int *nbytesp) +{ + return ce_state->ops->ce_completed_recv_next_nolock(ce_state, + per_transfer_ctx, + nbytesp); +} +EXPORT_SYMBOL(ath10k_ce_completed_recv_next_nolock); + +int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, - u32 *bufferp, - unsigned int *nbytesp, - unsigned int *transfer_idp, - unsigned int *flagsp) + unsigned int *nbytesp) { struct ath10k *ar = ce_state->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret; - spin_lock_bh(&ar_pci->ce_lock); - ret = ath10k_ce_completed_recv_next_nolock(ce_state, + spin_lock_bh(&ce->ce_lock); + ret = ce_state->ops->ce_completed_recv_next_nolock(ce_state, per_transfer_contextp, - bufferp, nbytesp, - transfer_idp, flagsp); - spin_unlock_bh(&ar_pci->ce_lock); + nbytesp); + + spin_unlock_bh(&ce->ce_lock); return ret; } +EXPORT_SYMBOL(ath10k_ce_completed_recv_next); -int ath10k_ce_revoke_recv_next(struct ce_state *ce_state, - void **per_transfer_contextp, - u32 *bufferp) +static int _ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp, + dma_addr_t *bufferp) { - struct ce_ring_state *dest_ring; + struct ath10k_ce_ring *dest_ring; unsigned int nentries_mask; unsigned int sw_index; unsigned int write_index; int ret; struct ath10k *ar; - struct ath10k_pci *ar_pci; + struct ath10k_ce *ce; dest_ring = ce_state->dest_ring; @@ -574,9 +899,9 @@ int ath10k_ce_revoke_recv_next(struct ce_state *ce_state, return -EIO; ar = ce_state->ar; - ar_pci = ath10k_pci_priv(ar); + ce = ath10k_ce_priv(ar); - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); nentries_mask = dest_ring->nentries_mask; sw_index = dest_ring->sw_index; @@ -594,6 +919,61 @@ int ath10k_ce_revoke_recv_next(struct ce_state *ce_state, /* sanity */ dest_ring->per_transfer_context[sw_index] = NULL; + desc->nbytes = 0; + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + dest_ring->sw_index = sw_index; + ret = 0; + } else { + ret = -EIO; + } + + spin_unlock_bh(&ce->ce_lock); + + return ret; +} + +static int _ath10k_ce_revoke_recv_next_64(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp, + dma_addr_t *bufferp) +{ + struct ath10k_ce_ring *dest_ring; + unsigned int nentries_mask; + unsigned int sw_index; + unsigned int write_index; + int ret; + struct ath10k *ar; + struct ath10k_ce *ce; + + dest_ring = ce_state->dest_ring; + + if (!dest_ring) + return -EIO; + + ar = ce_state->ar; + ce = ath10k_ce_priv(ar); + + spin_lock_bh(&ce->ce_lock); + + nentries_mask = dest_ring->nentries_mask; + sw_index = dest_ring->sw_index; + write_index = dest_ring->write_index; + if (write_index != sw_index) { + struct ce_desc_64 *base = dest_ring->base_addr_owner_space; + struct ce_desc_64 *desc = + CE_DEST_RING_TO_DESC_64(base, sw_index); + + /* Return data from completed destination descriptor */ + *bufferp = __le64_to_cpu(desc->addr); + + if (per_transfer_contextp) + *per_transfer_contextp = + dest_ring->per_transfer_context[sw_index]; + + /* sanity */ + dest_ring->per_transfer_context[sw_index] = NULL; + desc->nbytes = 0; /* Update sw_index */ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); @@ -603,28 +983,35 @@ int ath10k_ce_revoke_recv_next(struct ce_state *ce_state, ret = -EIO; } - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return ret; } +int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp, + dma_addr_t *bufferp) +{ + return ce_state->ops->ce_revoke_recv_next(ce_state, + per_transfer_contextp, + bufferp); +} +EXPORT_SYMBOL(ath10k_ce_revoke_recv_next); + /* * Guts of ath10k_ce_completed_send_next. * The caller takes responsibility for any necessary locking. */ -static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state, - void **per_transfer_contextp, - u32 *bufferp, - unsigned int *nbytesp, - unsigned int *transfer_idp) +static int _ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp) { - struct ce_ring_state *src_ring = ce_state->src_ring; + struct ath10k_ce_ring *src_ring = ce_state->src_ring; u32 ctrl_addr = ce_state->ctrl_addr; struct ath10k *ar = ce_state->ar; unsigned int nentries_mask = src_ring->nentries_mask; unsigned int sw_index = src_ring->sw_index; unsigned int read_index; - int ret = -EIO; + struct ce_desc *desc; if (src_ring->hw_index == sw_index) { /* @@ -634,53 +1021,150 @@ static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state, * the SW has really caught up to the HW, or if the cached * value of the HW index has become stale. */ - ath10k_pci_wake(ar); - src_ring->hw_index = - ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); - ath10k_pci_sleep(ar); + + read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); + if (read_index == 0xffffffff) + return -ENODEV; + + read_index &= nentries_mask; + src_ring->hw_index = read_index; } - read_index = src_ring->hw_index; - if ((read_index != sw_index) && (read_index != 0xffffffff)) { - struct ce_desc *sbase = src_ring->shadow_base; - struct ce_desc *sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index); + if (ar->hw_params.rri_on_ddr) + read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); + else + read_index = src_ring->hw_index; - /* Return data from completed source descriptor */ - *bufferp = __le32_to_cpu(sdesc->addr); - *nbytesp = __le16_to_cpu(sdesc->nbytes); - *transfer_idp = MS(__le16_to_cpu(sdesc->flags), - CE_DESC_FLAGS_META_DATA); + if (read_index == sw_index) + return -EIO; - if (per_transfer_contextp) - *per_transfer_contextp = - src_ring->per_transfer_context[sw_index]; + if (per_transfer_contextp) + *per_transfer_contextp = + src_ring->per_transfer_context[sw_index]; - /* sanity */ - src_ring->per_transfer_context[sw_index] = NULL; + /* sanity */ + src_ring->per_transfer_context[sw_index] = NULL; + desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space, + sw_index); + desc->nbytes = 0; - /* Update sw_index */ - sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); - src_ring->sw_index = sw_index; - ret = 0; + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + src_ring->sw_index = sw_index; + + return 0; +} + +static int _ath10k_ce_completed_send_next_nolock_64(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp) +{ + struct ath10k_ce_ring *src_ring = ce_state->src_ring; + u32 ctrl_addr = ce_state->ctrl_addr; + struct ath10k *ar = ce_state->ar; + unsigned int nentries_mask = src_ring->nentries_mask; + unsigned int sw_index = src_ring->sw_index; + unsigned int read_index; + struct ce_desc_64 *desc; + + if (src_ring->hw_index == sw_index) { + /* + * The SW completion index has caught up with the cached + * version of the HW completion index. + * Update the cached HW completion index to see whether + * the SW has really caught up to the HW, or if the cached + * value of the HW index has become stale. + */ + + read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); + if (read_index == 0xffffffff) + return -ENODEV; + + read_index &= nentries_mask; + src_ring->hw_index = read_index; } - return ret; + if (ar->hw_params.rri_on_ddr) + read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); + else + read_index = src_ring->hw_index; + + if (read_index == sw_index) + return -EIO; + + if (per_transfer_contextp) + *per_transfer_contextp = + src_ring->per_transfer_context[sw_index]; + + /* sanity */ + src_ring->per_transfer_context[sw_index] = NULL; + desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space, + sw_index); + desc->nbytes = 0; + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + src_ring->sw_index = sw_index; + + return 0; +} + +int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp) +{ + return ce_state->ops->ce_completed_send_next_nolock(ce_state, + per_transfer_contextp); +} +EXPORT_SYMBOL(ath10k_ce_completed_send_next_nolock); + +static void ath10k_ce_extract_desc_data(struct ath10k *ar, + struct ath10k_ce_ring *src_ring, + u32 sw_index, + dma_addr_t *bufferp, + u32 *nbytesp, + u32 *transfer_idp) +{ + struct ce_desc *base = src_ring->base_addr_owner_space; + struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index); + + /* Return data from completed source descriptor */ + *bufferp = __le32_to_cpu(desc->addr); + *nbytesp = __le16_to_cpu(desc->nbytes); + *transfer_idp = MS(__le16_to_cpu(desc->flags), + CE_DESC_FLAGS_META_DATA); +} + +static void ath10k_ce_extract_desc_data_64(struct ath10k *ar, + struct ath10k_ce_ring *src_ring, + u32 sw_index, + dma_addr_t *bufferp, + u32 *nbytesp, + u32 *transfer_idp) +{ + struct ce_desc_64 *base = src_ring->base_addr_owner_space; + struct ce_desc_64 *desc = + CE_SRC_RING_TO_DESC_64(base, sw_index); + + /* Return data from completed source descriptor */ + *bufferp = __le64_to_cpu(desc->addr); + *nbytesp = __le16_to_cpu(desc->nbytes); + *transfer_idp = MS(__le16_to_cpu(desc->flags), + CE_DESC_FLAGS_META_DATA); } /* NB: Modeled after ath10k_ce_completed_send_next */ -int ath10k_ce_cancel_send_next(struct ce_state *ce_state, +int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, - u32 *bufferp, + dma_addr_t *bufferp, unsigned int *nbytesp, unsigned int *transfer_idp) { - struct ce_ring_state *src_ring; + struct ath10k_ce_ring *src_ring; unsigned int nentries_mask; unsigned int sw_index; unsigned int write_index; int ret; struct ath10k *ar; - struct ath10k_pci *ar_pci; + struct ath10k_ce *ce; src_ring = ce_state->src_ring; @@ -688,23 +1172,18 @@ int ath10k_ce_cancel_send_next(struct ce_state *ce_state, return -EIO; ar = ce_state->ar; - ar_pci = ath10k_pci_priv(ar); + ce = ath10k_ce_priv(ar); - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); nentries_mask = src_ring->nentries_mask; sw_index = src_ring->sw_index; write_index = src_ring->write_index; if (write_index != sw_index) { - struct ce_desc *base = src_ring->base_addr_owner_space; - struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index); - - /* Return data from completed source descriptor */ - *bufferp = __le32_to_cpu(desc->addr); - *nbytesp = __le16_to_cpu(desc->nbytes); - *transfer_idp = MS(__le16_to_cpu(desc->flags), - CE_DESC_FLAGS_META_DATA); + ce_state->ops->ce_extract_desc_data(ar, src_ring, sw_index, + bufferp, nbytesp, + transfer_idp); if (per_transfer_contextp) *per_transfer_contextp = @@ -721,30 +1200,27 @@ int ath10k_ce_cancel_send_next(struct ce_state *ce_state, ret = -EIO; } - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return ret; } +EXPORT_SYMBOL(ath10k_ce_cancel_send_next); -int ath10k_ce_completed_send_next(struct ce_state *ce_state, - void **per_transfer_contextp, - u32 *bufferp, - unsigned int *nbytesp, - unsigned int *transfer_idp) +int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp) { struct ath10k *ar = ce_state->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); ret = ath10k_ce_completed_send_next_nolock(ce_state, - per_transfer_contextp, - bufferp, nbytesp, - transfer_idp); - spin_unlock_bh(&ar_pci->ce_lock); + per_transfer_contextp); + spin_unlock_bh(&ce->ce_lock); return ret; } +EXPORT_SYMBOL(ath10k_ce_completed_send_next); /* * Guts of interrupt handler for per-engine interrupts on a particular CE. @@ -754,81 +1230,46 @@ int ath10k_ce_completed_send_next(struct ce_state *ce_state, */ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id]; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; + const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; u32 ctrl_addr = ce_state->ctrl_addr; - void *transfer_context; - u32 buf; - unsigned int nbytes; - unsigned int id; - unsigned int flags; - - ath10k_pci_wake(ar); - spin_lock_bh(&ar_pci->ce_lock); - - /* Clear the copy-complete interrupts that will be handled here. */ - ath10k_ce_engine_int_status_clear(ar, ctrl_addr, - HOST_IS_COPY_COMPLETE_MASK); - - if (ce_state->recv_cb) { - /* - * Pop completed recv buffers and call the registered - * recv callback for each - */ - while (ath10k_ce_completed_recv_next_nolock(ce_state, - &transfer_context, - &buf, &nbytes, - &id, &flags) == 0) { - spin_unlock_bh(&ar_pci->ce_lock); - ce_state->recv_cb(ce_state, transfer_context, buf, - nbytes, id, flags); - spin_lock_bh(&ar_pci->ce_lock); - } - } - - if (ce_state->send_cb) { - /* - * Pop completed send buffers and call the registered - * send callback for each - */ - while (ath10k_ce_completed_send_next_nolock(ce_state, - &transfer_context, - &buf, - &nbytes, - &id) == 0) { - spin_unlock_bh(&ar_pci->ce_lock); - ce_state->send_cb(ce_state, transfer_context, - buf, nbytes, id); - spin_lock_bh(&ar_pci->ce_lock); - } - } /* + * Clear before handling + * * Misc CE interrupts are not being handled, but still need * to be cleared. + * + * NOTE: When the last copy engine interrupt is cleared the + * hardware will go to sleep. Once this happens any access to + * the CE registers can cause a hardware fault. */ - ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK); + ath10k_ce_engine_int_status_clear(ar, ctrl_addr, + wm_regs->cc_mask | wm_regs->wm_mask); - spin_unlock_bh(&ar_pci->ce_lock); - ath10k_pci_sleep(ar); + if (ce_state->recv_cb) + ce_state->recv_cb(ce_state); + + if (ce_state->send_cb) + ce_state->send_cb(ce_state); } +EXPORT_SYMBOL(ath10k_ce_per_engine_service); /* * Handler for per-engine interrupts on ALL active CEs. * This is used in cases where the system is sharing a - * single interrput for all CEs + * single interrupt for all CEs */ void ath10k_ce_per_engine_service_any(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ce_id; u32 intr_summary; - ath10k_pci_wake(ar); - intr_summary = CE_INTERRUPT_SUMMARY(ar); + intr_summary = ath10k_ce_interrupt_summary(ar); - for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) { + for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) { if (intr_summary & (1 << ce_id)) intr_summary &= ~(1 << ce_id); else @@ -837,9 +1278,8 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar) ath10k_ce_per_engine_service(ar, ce_id); } - - ath10k_pci_sleep(ar); } +EXPORT_SYMBOL(ath10k_ce_per_engine_service_any); /* * Adjust interrupts for the copy complete handler. @@ -848,13 +1288,11 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar) * * Called with ce_lock held. */ -static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state, - int disable_copy_compl_intr) +static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state) { u32 ctrl_addr = ce_state->ctrl_addr; struct ath10k *ar = ce_state->ar; - - ath10k_pci_wake(ar); + bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR; if ((!disable_copy_compl_intr) && (ce_state->send_cb || ce_state->recv_cb)) @@ -863,251 +1301,358 @@ static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state, ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); ath10k_ce_watermark_intr_disable(ar, ctrl_addr); +} - ath10k_pci_sleep(ar); +void ath10k_ce_disable_interrupt(struct ath10k *ar, int ce_id) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state; + u32 ctrl_addr; + + ce_state = &ce->ce_states[ce_id]; + if (ce_state->attr_flags & CE_ATTR_POLL) + return; + + ctrl_addr = ath10k_ce_base_address(ar, ce_id); + + ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); + ath10k_ce_error_intr_disable(ar, ctrl_addr); + ath10k_ce_watermark_intr_disable(ar, ctrl_addr); } +EXPORT_SYMBOL(ath10k_ce_disable_interrupt); void ath10k_ce_disable_interrupts(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ce_id; - ath10k_pci_wake(ar); - for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) { - struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id]; - u32 ctrl_addr = ce_state->ctrl_addr; - - ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); - } - ath10k_pci_sleep(ar); + for (ce_id = 0; ce_id < CE_COUNT; ce_id++) + ath10k_ce_disable_interrupt(ar, ce_id); } +EXPORT_SYMBOL(ath10k_ce_disable_interrupts); -void ath10k_ce_send_cb_register(struct ce_state *ce_state, - void (*send_cb) (struct ce_state *ce_state, - void *transfer_context, - u32 buffer, - unsigned int nbytes, - unsigned int transfer_id), - int disable_interrupts) +void ath10k_ce_enable_interrupt(struct ath10k *ar, int ce_id) { - struct ath10k *ar = ce_state->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state; + + ce_state = &ce->ce_states[ce_id]; + if (ce_state->attr_flags & CE_ATTR_POLL) + return; - spin_lock_bh(&ar_pci->ce_lock); - ce_state->send_cb = send_cb; - ath10k_ce_per_engine_handler_adjust(ce_state, disable_interrupts); - spin_unlock_bh(&ar_pci->ce_lock); + ath10k_ce_per_engine_handler_adjust(ce_state); } +EXPORT_SYMBOL(ath10k_ce_enable_interrupt); -void ath10k_ce_recv_cb_register(struct ce_state *ce_state, - void (*recv_cb) (struct ce_state *ce_state, - void *transfer_context, - u32 buffer, - unsigned int nbytes, - unsigned int transfer_id, - unsigned int flags)) +void ath10k_ce_enable_interrupts(struct ath10k *ar) { - struct ath10k *ar = ce_state->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ce_id; - spin_lock_bh(&ar_pci->ce_lock); - ce_state->recv_cb = recv_cb; - ath10k_ce_per_engine_handler_adjust(ce_state, 0); - spin_unlock_bh(&ar_pci->ce_lock); + /* Enable interrupts for copy engine that + * are not using polling mode. + */ + for (ce_id = 0; ce_id < CE_COUNT; ce_id++) + ath10k_ce_enable_interrupt(ar, ce_id); } +EXPORT_SYMBOL(ath10k_ce_enable_interrupts); static int ath10k_ce_init_src_ring(struct ath10k *ar, unsigned int ce_id, - struct ce_state *ce_state, const struct ce_attr *attr) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ce_ring_state *src_ring; - unsigned int nentries = attr->src_nentries; - unsigned int ce_nbytes; - u32 ctrl_addr = ath10k_ce_base_address(ce_id); - dma_addr_t base_addr; - char *ptr; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; + struct ath10k_ce_ring *src_ring = ce_state->src_ring; + u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id); - nentries = roundup_pow_of_two(nentries); + nentries = roundup_pow_of_two(attr->src_nentries); - if (ce_state->src_ring) { - WARN_ON(ce_state->src_ring->nentries != nentries); - return 0; - } - - ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *)); - ptr = kzalloc(ce_nbytes, GFP_KERNEL); - if (ptr == NULL) - return -ENOMEM; - - ce_state->src_ring = (struct ce_ring_state *)ptr; - src_ring = ce_state->src_ring; - - ptr += sizeof(struct ce_ring_state); - src_ring->nentries = nentries; - src_ring->nentries_mask = nentries - 1; + if (ar->hw_params.target_64bit) + memset(src_ring->base_addr_owner_space, 0, + nentries * sizeof(struct ce_desc_64)); + else + memset(src_ring->base_addr_owner_space, 0, + nentries * sizeof(struct ce_desc)); - ath10k_pci_wake(ar); src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); + src_ring->sw_index &= src_ring->nentries_mask; src_ring->hw_index = src_ring->sw_index; src_ring->write_index = ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); - ath10k_pci_sleep(ar); - - src_ring->per_transfer_context = (void **)ptr; - - /* - * Legacy platforms that do not support cache - * coherent DMA are unsupported - */ - src_ring->base_addr_owner_space_unaligned = - pci_alloc_consistent(ar_pci->pdev, - (nentries * sizeof(struct ce_desc) + - CE_DESC_RING_ALIGN), - &base_addr); - src_ring->base_addr_ce_space_unaligned = base_addr; + src_ring->write_index &= src_ring->nentries_mask; - src_ring->base_addr_owner_space = PTR_ALIGN( - src_ring->base_addr_owner_space_unaligned, - CE_DESC_RING_ALIGN); - src_ring->base_addr_ce_space = ALIGN( - src_ring->base_addr_ce_space_unaligned, - CE_DESC_RING_ALIGN); - - /* - * Also allocate a shadow src ring in regular - * mem to use for faster access. - */ - src_ring->shadow_base_unaligned = - kmalloc((nentries * sizeof(struct ce_desc) + - CE_DESC_RING_ALIGN), GFP_KERNEL); - - src_ring->shadow_base = PTR_ALIGN( - src_ring->shadow_base_unaligned, - CE_DESC_RING_ALIGN); - - ath10k_pci_wake(ar); - ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, + ath10k_ce_src_ring_base_addr_set(ar, ce_id, src_ring->base_addr_ce_space); ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries); ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max); ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0); ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0); ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries); - ath10k_pci_sleep(ar); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot init ce src ring id %d entries %d base_addr %p\n", + ce_id, nentries, src_ring->base_addr_owner_space); return 0; } static int ath10k_ce_init_dest_ring(struct ath10k *ar, unsigned int ce_id, - struct ce_state *ce_state, const struct ce_attr *attr) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ce_ring_state *dest_ring; - unsigned int nentries = attr->dest_nentries; - unsigned int ce_nbytes; - u32 ctrl_addr = ath10k_ce_base_address(ce_id); + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; + struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; + u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id); + + nentries = roundup_pow_of_two(attr->dest_nentries); + + if (ar->hw_params.target_64bit) + memset(dest_ring->base_addr_owner_space, 0, + nentries * sizeof(struct ce_desc_64)); + else + memset(dest_ring->base_addr_owner_space, 0, + nentries * sizeof(struct ce_desc)); + + dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr); + dest_ring->sw_index &= dest_ring->nentries_mask; + dest_ring->write_index = + ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); + dest_ring->write_index &= dest_ring->nentries_mask; + + ath10k_ce_dest_ring_base_addr_set(ar, ce_id, + dest_ring->base_addr_ce_space); + ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries); + ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0); + ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0); + ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot ce dest ring id %d entries %d base_addr %p\n", + ce_id, nentries, dest_ring->base_addr_owner_space); + + return 0; +} + +static int ath10k_ce_alloc_shadow_base(struct ath10k *ar, + struct ath10k_ce_ring *src_ring, + u32 nentries) +{ + src_ring->shadow_base_unaligned = kcalloc(nentries, + sizeof(struct ce_desc_64), + GFP_KERNEL); + if (!src_ring->shadow_base_unaligned) + return -ENOMEM; + + src_ring->shadow_base = (struct ce_desc_64 *) + PTR_ALIGN(src_ring->shadow_base_unaligned, + CE_DESC_RING_ALIGN); + return 0; +} + +static struct ath10k_ce_ring * +ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, + const struct ce_attr *attr) +{ + struct ath10k_ce_ring *src_ring; + u32 nentries = attr->src_nentries; dma_addr_t base_addr; - char *ptr; + int ret; nentries = roundup_pow_of_two(nentries); - if (ce_state->dest_ring) { - WARN_ON(ce_state->dest_ring->nentries != nentries); - return 0; + src_ring = kzalloc(struct_size(src_ring, per_transfer_context, + nentries), GFP_KERNEL); + if (src_ring == NULL) + return ERR_PTR(-ENOMEM); + + src_ring->nentries = nentries; + src_ring->nentries_mask = nentries - 1; + + /* + * Legacy platforms that do not support cache + * coherent DMA are unsupported + */ + src_ring->base_addr_owner_space_unaligned = + dma_alloc_coherent(ar->dev, + (nentries * sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + &base_addr, GFP_KERNEL); + if (!src_ring->base_addr_owner_space_unaligned) { + kfree(src_ring); + return ERR_PTR(-ENOMEM); } - ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *)); - ptr = kzalloc(ce_nbytes, GFP_KERNEL); - if (ptr == NULL) - return -ENOMEM; + src_ring->base_addr_ce_space_unaligned = base_addr; - ce_state->dest_ring = (struct ce_ring_state *)ptr; - dest_ring = ce_state->dest_ring; + src_ring->base_addr_owner_space = + PTR_ALIGN(src_ring->base_addr_owner_space_unaligned, + CE_DESC_RING_ALIGN); + src_ring->base_addr_ce_space = + ALIGN(src_ring->base_addr_ce_space_unaligned, + CE_DESC_RING_ALIGN); + + if (ar->hw_params.shadow_reg_support) { + ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries); + if (ret) { + dma_free_coherent(ar->dev, + (nentries * sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + src_ring->base_addr_owner_space_unaligned, + base_addr); + kfree(src_ring); + return ERR_PTR(ret); + } + } - ptr += sizeof(struct ce_ring_state); - dest_ring->nentries = nentries; - dest_ring->nentries_mask = nentries - 1; + return src_ring; +} - ath10k_pci_wake(ar); - dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr); - dest_ring->write_index = - ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); - ath10k_pci_sleep(ar); +static struct ath10k_ce_ring * +ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id, + const struct ce_attr *attr) +{ + struct ath10k_ce_ring *src_ring; + u32 nentries = attr->src_nentries; + dma_addr_t base_addr; + int ret; + + nentries = roundup_pow_of_two(nentries); + + src_ring = kzalloc(struct_size(src_ring, per_transfer_context, + nentries), GFP_KERNEL); + if (!src_ring) + return ERR_PTR(-ENOMEM); + + src_ring->nentries = nentries; + src_ring->nentries_mask = nentries - 1; + + /* Legacy platforms that do not support cache + * coherent DMA are unsupported + */ + src_ring->base_addr_owner_space_unaligned = + dma_alloc_coherent(ar->dev, + (nentries * sizeof(struct ce_desc_64) + + CE_DESC_RING_ALIGN), + &base_addr, GFP_KERNEL); + if (!src_ring->base_addr_owner_space_unaligned) { + kfree(src_ring); + return ERR_PTR(-ENOMEM); + } + + src_ring->base_addr_ce_space_unaligned = base_addr; - dest_ring->per_transfer_context = (void **)ptr; + src_ring->base_addr_owner_space = + PTR_ALIGN(src_ring->base_addr_owner_space_unaligned, + CE_DESC_RING_ALIGN); + src_ring->base_addr_ce_space = + ALIGN(src_ring->base_addr_ce_space_unaligned, + CE_DESC_RING_ALIGN); + + if (ar->hw_params.shadow_reg_support) { + ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries); + if (ret) { + dma_free_coherent(ar->dev, + (nentries * sizeof(struct ce_desc_64) + + CE_DESC_RING_ALIGN), + src_ring->base_addr_owner_space_unaligned, + base_addr); + kfree(src_ring); + return ERR_PTR(ret); + } + } + + return src_ring; +} + +static struct ath10k_ce_ring * +ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, + const struct ce_attr *attr) +{ + struct ath10k_ce_ring *dest_ring; + u32 nentries; + dma_addr_t base_addr; + + nentries = roundup_pow_of_two(attr->dest_nentries); + + dest_ring = kzalloc(struct_size(dest_ring, per_transfer_context, + nentries), GFP_KERNEL); + if (dest_ring == NULL) + return ERR_PTR(-ENOMEM); + + dest_ring->nentries = nentries; + dest_ring->nentries_mask = nentries - 1; /* * Legacy platforms that do not support cache * coherent DMA are unsupported */ dest_ring->base_addr_owner_space_unaligned = - pci_alloc_consistent(ar_pci->pdev, - (nentries * sizeof(struct ce_desc) + - CE_DESC_RING_ALIGN), - &base_addr); + dma_alloc_coherent(ar->dev, + (nentries * sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + &base_addr, GFP_KERNEL); + if (!dest_ring->base_addr_owner_space_unaligned) { + kfree(dest_ring); + return ERR_PTR(-ENOMEM); + } + dest_ring->base_addr_ce_space_unaligned = base_addr; - /* - * Correctly initialize memory to 0 to prevent garbage - * data crashing system when download firmware - */ - memset(dest_ring->base_addr_owner_space_unaligned, 0, - nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN); - - dest_ring->base_addr_owner_space = PTR_ALIGN( - dest_ring->base_addr_owner_space_unaligned, - CE_DESC_RING_ALIGN); - dest_ring->base_addr_ce_space = ALIGN( - dest_ring->base_addr_ce_space_unaligned, - CE_DESC_RING_ALIGN); - - ath10k_pci_wake(ar); - ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, - dest_ring->base_addr_ce_space); - ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries); - ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0); - ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0); - ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries); - ath10k_pci_sleep(ar); + dest_ring->base_addr_owner_space = + PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned, + CE_DESC_RING_ALIGN); + dest_ring->base_addr_ce_space = + ALIGN(dest_ring->base_addr_ce_space_unaligned, + CE_DESC_RING_ALIGN); - return 0; + return dest_ring; } -static struct ce_state *ath10k_ce_init_state(struct ath10k *ar, - unsigned int ce_id, - const struct ce_attr *attr) +static struct ath10k_ce_ring * +ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id, + const struct ce_attr *attr) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ce_state *ce_state = NULL; - u32 ctrl_addr = ath10k_ce_base_address(ce_id); + struct ath10k_ce_ring *dest_ring; + u32 nentries; + dma_addr_t base_addr; - spin_lock_bh(&ar_pci->ce_lock); + nentries = roundup_pow_of_two(attr->dest_nentries); - if (!ar_pci->ce_id_to_state[ce_id]) { - ce_state = kzalloc(sizeof(*ce_state), GFP_ATOMIC); - if (ce_state == NULL) { - spin_unlock_bh(&ar_pci->ce_lock); - return NULL; - } + dest_ring = kzalloc(struct_size(dest_ring, per_transfer_context, + nentries), GFP_KERNEL); + if (!dest_ring) + return ERR_PTR(-ENOMEM); - ar_pci->ce_id_to_state[ce_id] = ce_state; - ce_state->ar = ar; - ce_state->id = ce_id; - ce_state->ctrl_addr = ctrl_addr; - ce_state->state = CE_RUNNING; - /* Save attribute flags */ - ce_state->attr_flags = attr->flags; - ce_state->src_sz_max = attr->src_sz_max; + dest_ring->nentries = nentries; + dest_ring->nentries_mask = nentries - 1; + + /* Legacy platforms that do not support cache + * coherent DMA are unsupported + */ + dest_ring->base_addr_owner_space_unaligned = + dma_alloc_coherent(ar->dev, + (nentries * sizeof(struct ce_desc_64) + + CE_DESC_RING_ALIGN), + &base_addr, GFP_KERNEL); + if (!dest_ring->base_addr_owner_space_unaligned) { + kfree(dest_ring); + return ERR_PTR(-ENOMEM); } - spin_unlock_bh(&ar_pci->ce_lock); + dest_ring->base_addr_ce_space_unaligned = base_addr; - return ce_state; + /* Correctly initialize memory to 0 to prevent garbage + * data crashing system when download firmware + */ + dest_ring->base_addr_owner_space = + PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned, + CE_DESC_RING_ALIGN); + dest_ring->base_addr_ce_space = + ALIGN(dest_ring->base_addr_ce_space_unaligned, + CE_DESC_RING_ALIGN); + + return dest_ring; } /* @@ -1117,73 +1662,310 @@ static struct ce_state *ath10k_ce_init_state(struct ath10k *ar, * initialization. It may be that only one side or the other is * initialized by software/firmware. */ -struct ce_state *ath10k_ce_init(struct ath10k *ar, - unsigned int ce_id, - const struct ce_attr *attr) +int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, + const struct ce_attr *attr) { - struct ce_state *ce_state; - u32 ctrl_addr = ath10k_ce_base_address(ce_id); - - ce_state = ath10k_ce_init_state(ar, ce_id, attr); - if (!ce_state) { - ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id); - return NULL; - } + int ret; if (attr->src_nentries) { - if (ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr)) { - ath10k_err("Failed to initialize CE src ring for ID: %d\n", - ce_id); - ath10k_ce_deinit(ce_state); - return NULL; + ret = ath10k_ce_init_src_ring(ar, ce_id, attr); + if (ret) { + ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n", + ce_id, ret); + return ret; } } if (attr->dest_nentries) { - if (ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr)) { - ath10k_err("Failed to initialize CE dest ring for ID: %d\n", - ce_id); - ath10k_ce_deinit(ce_state); - return NULL; + ret = ath10k_ce_init_dest_ring(ar, ce_id, attr); + if (ret) { + ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n", + ce_id, ret); + return ret; } } - /* Enable CE error interrupts */ - ath10k_pci_wake(ar); - ath10k_ce_error_intr_enable(ar, ctrl_addr); - ath10k_pci_sleep(ar); + return 0; +} +EXPORT_SYMBOL(ath10k_ce_init_pipe); - return ce_state; +static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id) +{ + u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); + + ath10k_ce_src_ring_base_addr_set(ar, ce_id, 0); + ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0); + ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0); + ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0); } -void ath10k_ce_deinit(struct ce_state *ce_state) +static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id) { - unsigned int ce_id = ce_state->id; - struct ath10k *ar = ce_state->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); - ce_state->state = CE_UNUSED; - ar_pci->ce_id_to_state[ce_id] = NULL; + ath10k_ce_dest_ring_base_addr_set(ar, ce_id, 0); + ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0); + ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0); +} + +void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id) +{ + ath10k_ce_deinit_src_ring(ar, ce_id); + ath10k_ce_deinit_dest_ring(ar, ce_id); +} +EXPORT_SYMBOL(ath10k_ce_deinit_pipe); + +static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; if (ce_state->src_ring) { - kfree(ce_state->src_ring->shadow_base_unaligned); - pci_free_consistent(ar_pci->pdev, - (ce_state->src_ring->nentries * - sizeof(struct ce_desc) + - CE_DESC_RING_ALIGN), - ce_state->src_ring->base_addr_owner_space, - ce_state->src_ring->base_addr_ce_space); + if (ar->hw_params.shadow_reg_support) + kfree(ce_state->src_ring->shadow_base_unaligned); + dma_free_coherent(ar->dev, + (ce_state->src_ring->nentries * + sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + ce_state->src_ring->base_addr_owner_space, + ce_state->src_ring->base_addr_ce_space); kfree(ce_state->src_ring); } if (ce_state->dest_ring) { - pci_free_consistent(ar_pci->pdev, - (ce_state->dest_ring->nentries * - sizeof(struct ce_desc) + - CE_DESC_RING_ALIGN), - ce_state->dest_ring->base_addr_owner_space, - ce_state->dest_ring->base_addr_ce_space); + dma_free_coherent(ar->dev, + (ce_state->dest_ring->nentries * + sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + ce_state->dest_ring->base_addr_owner_space, + ce_state->dest_ring->base_addr_ce_space); kfree(ce_state->dest_ring); } - kfree(ce_state); + + ce_state->src_ring = NULL; + ce_state->dest_ring = NULL; +} + +static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; + + if (ce_state->src_ring) { + if (ar->hw_params.shadow_reg_support) + kfree(ce_state->src_ring->shadow_base_unaligned); + dma_free_coherent(ar->dev, + (ce_state->src_ring->nentries * + sizeof(struct ce_desc_64) + + CE_DESC_RING_ALIGN), + ce_state->src_ring->base_addr_owner_space, + ce_state->src_ring->base_addr_ce_space); + kfree(ce_state->src_ring); + } + + if (ce_state->dest_ring) { + dma_free_coherent(ar->dev, + (ce_state->dest_ring->nentries * + sizeof(struct ce_desc_64) + + CE_DESC_RING_ALIGN), + ce_state->dest_ring->base_addr_owner_space, + ce_state->dest_ring->base_addr_ce_space); + kfree(ce_state->dest_ring); + } + + ce_state->src_ring = NULL; + ce_state->dest_ring = NULL; +} + +void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; + + ce_state->ops->ce_free_pipe(ar, ce_id); +} +EXPORT_SYMBOL(ath10k_ce_free_pipe); + +void ath10k_ce_dump_registers(struct ath10k *ar, + struct ath10k_fw_crash_data *crash_data) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_crash_data ce_data; + u32 addr, id; + + lockdep_assert_held(&ar->dump_mutex); + + ath10k_err(ar, "Copy Engine register dump:\n"); + + spin_lock_bh(&ce->ce_lock); + for (id = 0; id < CE_COUNT; id++) { + addr = ath10k_ce_base_address(ar, id); + ce_data.base_addr = cpu_to_le32(addr); + + ce_data.src_wr_idx = + cpu_to_le32(ath10k_ce_src_ring_write_index_get(ar, addr)); + ce_data.src_r_idx = + cpu_to_le32(ath10k_ce_src_ring_read_index_get(ar, addr)); + ce_data.dst_wr_idx = + cpu_to_le32(ath10k_ce_dest_ring_write_index_get(ar, addr)); + ce_data.dst_r_idx = + cpu_to_le32(ath10k_ce_dest_ring_read_index_get(ar, addr)); + + if (crash_data) + crash_data->ce_crash_data[id] = ce_data; + + ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u", id, + le32_to_cpu(ce_data.base_addr), + le32_to_cpu(ce_data.src_wr_idx), + le32_to_cpu(ce_data.src_r_idx), + le32_to_cpu(ce_data.dst_wr_idx), + le32_to_cpu(ce_data.dst_r_idx)); + } + + spin_unlock_bh(&ce->ce_lock); +} +EXPORT_SYMBOL(ath10k_ce_dump_registers); + +static const struct ath10k_ce_ops ce_ops = { + .ce_alloc_src_ring = ath10k_ce_alloc_src_ring, + .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring, + .ce_rx_post_buf = __ath10k_ce_rx_post_buf, + .ce_completed_recv_next_nolock = _ath10k_ce_completed_recv_next_nolock, + .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next, + .ce_extract_desc_data = ath10k_ce_extract_desc_data, + .ce_free_pipe = _ath10k_ce_free_pipe, + .ce_send_nolock = _ath10k_ce_send_nolock, + .ce_set_src_ring_base_addr_hi = NULL, + .ce_set_dest_ring_base_addr_hi = NULL, + .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock, +}; + +static const struct ath10k_ce_ops ce_64_ops = { + .ce_alloc_src_ring = ath10k_ce_alloc_src_ring_64, + .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring_64, + .ce_rx_post_buf = __ath10k_ce_rx_post_buf_64, + .ce_completed_recv_next_nolock = + _ath10k_ce_completed_recv_next_nolock_64, + .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next_64, + .ce_extract_desc_data = ath10k_ce_extract_desc_data_64, + .ce_free_pipe = _ath10k_ce_free_pipe_64, + .ce_send_nolock = _ath10k_ce_send_nolock_64, + .ce_set_src_ring_base_addr_hi = ath10k_ce_set_src_ring_base_addr_hi, + .ce_set_dest_ring_base_addr_hi = ath10k_ce_set_dest_ring_base_addr_hi, + .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock_64, +}; + +static void ath10k_ce_set_ops(struct ath10k *ar, + struct ath10k_ce_pipe *ce_state) +{ + switch (ar->hw_rev) { + case ATH10K_HW_WCN3990: + ce_state->ops = &ce_64_ops; + break; + default: + ce_state->ops = &ce_ops; + break; + } +} + +int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, + const struct ce_attr *attr) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; + int ret; + + ath10k_ce_set_ops(ar, ce_state); + /* Make sure there's enough CE ringbuffer entries for HTT TX to avoid + * additional TX locking checks. + * + * For the lack of a better place do the check here. + */ + BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC > + (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); + BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC > + (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); + BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC > + (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); + + ce_state->ar = ar; + ce_state->id = ce_id; + ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id); + ce_state->attr_flags = attr->flags; + ce_state->src_sz_max = attr->src_sz_max; + + if (attr->src_nentries) + ce_state->send_cb = attr->send_cb; + + if (attr->dest_nentries) + ce_state->recv_cb = attr->recv_cb; + + if (attr->src_nentries) { + ce_state->src_ring = + ce_state->ops->ce_alloc_src_ring(ar, ce_id, attr); + if (IS_ERR(ce_state->src_ring)) { + ret = PTR_ERR(ce_state->src_ring); + ath10k_err(ar, "failed to alloc CE src ring %d: %d\n", + ce_id, ret); + ce_state->src_ring = NULL; + return ret; + } + } + + if (attr->dest_nentries) { + ce_state->dest_ring = ce_state->ops->ce_alloc_dst_ring(ar, + ce_id, + attr); + if (IS_ERR(ce_state->dest_ring)) { + ret = PTR_ERR(ce_state->dest_ring); + ath10k_err(ar, "failed to alloc CE dest ring %d: %d\n", + ce_id, ret); + ce_state->dest_ring = NULL; + return ret; + } + } + + return 0; +} +EXPORT_SYMBOL(ath10k_ce_alloc_pipe); + +void ath10k_ce_alloc_rri(struct ath10k *ar) +{ + int i; + u32 value; + u32 ctrl1_regs; + u32 ce_base_addr; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + ce->vaddr_rri = dma_alloc_coherent(ar->dev, + (CE_COUNT * sizeof(u32)), + &ce->paddr_rri, GFP_KERNEL); + + if (!ce->vaddr_rri) + return; + + ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_low, + lower_32_bits(ce->paddr_rri)); + ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_high, + (upper_32_bits(ce->paddr_rri) & + CE_DESC_ADDR_HI_MASK)); + + for (i = 0; i < CE_COUNT; i++) { + ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr; + ce_base_addr = ath10k_ce_base_address(ar, i); + value = ath10k_ce_read32(ar, ce_base_addr + ctrl1_regs); + value |= ar->hw_ce_regs->upd->mask; + ath10k_ce_write32(ar, ce_base_addr + ctrl1_regs, value); + } +} +EXPORT_SYMBOL(ath10k_ce_alloc_rri); + +void ath10k_ce_free_rri(struct ath10k *ar) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + dma_free_coherent(ar->dev, (CE_COUNT * sizeof(u32)), + ce->vaddr_rri, + ce->paddr_rri); } +EXPORT_SYMBOL(ath10k_ce_free_rri); diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index c17f07c026f4..27367bd64e95 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -1,18 +1,8 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018 The Linux Foundation. All rights reserved. */ #ifndef _CE_H_ @@ -20,14 +10,10 @@ #include "hif.h" - -/* Maximum number of Copy Engine's supported */ -#define CE_COUNT_MAX 8 -#define CE_HTT_H2T_MSG_SRC_NENTRIES 2048 +#define CE_HTT_H2T_MSG_SRC_NENTRIES 8192 /* Descriptor rings must be aligned to this boundary */ #define CE_DESC_RING_ALIGN 8 -#define CE_SENDLIST_ITEMS_MAX 12 #define CE_SEND_FLAG_GATHER 0x00010000 /* @@ -36,20 +22,24 @@ * how to use copy engines. */ -struct ce_state; - - -/* Copy Engine operational state */ -enum ce_op_state { - CE_UNUSED, - CE_PAUSED, - CE_RUNNING, -}; +struct ath10k_ce_pipe; #define CE_DESC_FLAGS_GATHER (1 << 0) #define CE_DESC_FLAGS_BYTE_SWAP (1 << 1) -#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC -#define CE_DESC_FLAGS_META_DATA_LSB 3 +#define CE_WCN3990_DESC_FLAGS_GATHER BIT(31) + +#define CE_DESC_ADDR_MASK GENMASK_ULL(34, 0) +#define CE_DESC_ADDR_HI_MASK GENMASK(4, 0) + +/* Following desc flags are used in QCA99X0 */ +#define CE_DESC_FLAGS_HOST_INT_DIS (1 << 2) +#define CE_DESC_FLAGS_TGT_INT_DIS (1 << 3) + +#define CE_DESC_FLAGS_META_DATA_MASK ar->hw_values->ce_desc_meta_data_mask +#define CE_DESC_FLAGS_META_DATA_LSB ar->hw_values->ce_desc_meta_data_lsb + +#define CE_DDR_RRI_MASK GENMASK(15, 0) +#define CE_DDR_DRRI_SHIFT 16 struct ce_desc { __le32 addr; @@ -57,8 +47,17 @@ struct ce_desc { __le16 flags; /* %CE_DESC_FLAGS_ */ }; -/* Copy Engine Ring internal state */ -struct ce_ring_state { +struct ce_desc_64 { + __le64 addr; + __le16 nbytes; /* length in register map */ + __le16 flags; /* fw_metadata_high */ + __le32 toeplitz_hash_result; +}; + +#define CE_DESC_SIZE sizeof(struct ce_desc) +#define CE_DESC_SIZE_64 sizeof(struct ce_desc_64) + +struct ath10k_ce_ring { /* Number of entries in this ring; must be power of 2 */ unsigned int nentries; unsigned int nentries_mask; @@ -94,7 +93,7 @@ struct ce_ring_state { /* Host address space */ void *base_addr_owner_space_unaligned; /* CE address space */ - u32 base_addr_ce_space_unaligned; + dma_addr_t base_addr_ce_space_unaligned; /* * Actual start of descriptors. @@ -105,64 +104,54 @@ struct ce_ring_state { void *base_addr_owner_space; /* CE address space */ - u32 base_addr_ce_space; - /* - * Start of shadow copy of descriptors, within regular memory. - * Aligned to descriptor-size boundary. - */ - void *shadow_base_unaligned; - struct ce_desc *shadow_base; + dma_addr_t base_addr_ce_space; + + char *shadow_base_unaligned; + struct ce_desc_64 *shadow_base; - void **per_transfer_context; + /* keep last */ + void *per_transfer_context[] __counted_by(nentries); }; -/* Copy Engine internal state */ -struct ce_state { +struct ath10k_ce_pipe { struct ath10k *ar; unsigned int id; unsigned int attr_flags; u32 ctrl_addr; - enum ce_op_state state; - - void (*send_cb) (struct ce_state *ce_state, - void *per_transfer_send_context, - u32 buffer, - unsigned int nbytes, - unsigned int transfer_id); - void (*recv_cb) (struct ce_state *ce_state, - void *per_transfer_recv_context, - u32 buffer, - unsigned int nbytes, - unsigned int transfer_id, - unsigned int flags); + + void (*send_cb)(struct ath10k_ce_pipe *); + void (*recv_cb)(struct ath10k_ce_pipe *); unsigned int src_sz_max; - struct ce_ring_state *src_ring; - struct ce_ring_state *dest_ring; + struct ath10k_ce_ring *src_ring; + struct ath10k_ce_ring *dest_ring; + const struct ath10k_ce_ops *ops; }; -struct ce_sendlist_item { - /* e.g. buffer or desc list */ - dma_addr_t data; - union { - /* simple buffer */ - unsigned int nbytes; - /* Rx descriptor list */ - unsigned int ndesc; - } u; - /* externally-specified flags; OR-ed with internal flags */ - u32 flags; -}; +/* Copy Engine settable attributes */ +struct ce_attr; -struct ce_sendlist { - unsigned int num_items; - struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX]; +struct ath10k_bus_ops { + u32 (*read32)(struct ath10k *ar, u32 offset); + void (*write32)(struct ath10k *ar, u32 offset, u32 value); + int (*get_num_banks)(struct ath10k *ar); }; -/* Copy Engine settable attributes */ -struct ce_attr; +static inline struct ath10k_ce *ath10k_ce_priv(struct ath10k *ar) +{ + return (struct ath10k_ce *)ar->ce_priv; +} + +struct ath10k_ce { + /* protects CE info */ + spinlock_t ce_lock; + const struct ath10k_bus_ops *bus_ops; + struct ath10k_ce_pipe ce_states[CE_COUNT_MAX]; + u32 *vaddr_rri; + dma_addr_t paddr_rri; +}; /*==================Send====================*/ @@ -182,68 +171,31 @@ struct ce_attr; * * Implementation note: pushes 1 buffer to Source ring */ -int ath10k_ce_send(struct ce_state *ce_state, +int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, void *per_transfer_send_context, - u32 buffer, + dma_addr_t buffer, unsigned int nbytes, /* 14 bits */ unsigned int transfer_id, unsigned int flags); -void ath10k_ce_send_cb_register(struct ce_state *ce_state, - void (*send_cb) (struct ce_state *ce_state, - void *transfer_context, - u32 buffer, - unsigned int nbytes, - unsigned int transfer_id), - int disable_interrupts); - -/* Append a simple buffer (address/length) to a sendlist. */ -void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, - u32 buffer, - unsigned int nbytes, - /* OR-ed with internal flags */ - u32 flags); +int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, + void *per_transfer_context, + dma_addr_t buffer, + unsigned int nbytes, + unsigned int transfer_id, + unsigned int flags); -/* - * Queue a "sendlist" of buffers to be sent using gather to a single - * anonymous destination buffer - * ce - which copy engine to use - * sendlist - list of simple buffers to send using gather - * transfer_id - arbitrary ID; reflected to destination - * Returns 0 on success; otherwise an error status. - * - * Implemenation note: Pushes multiple buffers with Gather to Source ring. - */ -int ath10k_ce_sendlist_send(struct ce_state *ce_state, - void *per_transfer_send_context, - struct ce_sendlist *sendlist, - /* 14 bits */ - unsigned int transfer_id); +void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe); + +int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe); /*==================Recv=======================*/ -/* - * Make a buffer available to receive. The buffer must be at least of a - * minimal size appropriate for this copy engine (src_sz_max attribute). - * ce - which copy engine to use - * per_transfer_recv_context - context passed back to caller's recv_cb - * buffer - address of buffer in CE space - * Returns 0 on success; otherwise an error status. - * - * Implemenation note: Pushes a buffer to Dest ring. - */ -int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state, - void *per_transfer_recv_context, - u32 buffer); - -void ath10k_ce_recv_cb_register(struct ce_state *ce_state, - void (*recv_cb) (struct ce_state *ce_state, - void *transfer_context, - u32 buffer, - unsigned int nbytes, - unsigned int transfer_id, - unsigned int flags)); +int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe); +int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, + dma_addr_t paddr); +void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries); /* recv flags */ /* Data is byte-swapped */ @@ -253,28 +205,27 @@ void ath10k_ce_recv_cb_register(struct ce_state *ce_state, * Supply data for the next completed unprocessed receive descriptor. * Pops buffer from Dest ring. */ -int ath10k_ce_completed_recv_next(struct ce_state *ce_state, +int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, - u32 *bufferp, - unsigned int *nbytesp, - unsigned int *transfer_idp, - unsigned int *flagsp); + unsigned int *nbytesp); /* * Supply data for the next completed unprocessed send descriptor. * Pops 1 completed send buffer from Source ring. */ -int ath10k_ce_completed_send_next(struct ce_state *ce_state, - void **per_transfer_contextp, - u32 *bufferp, - unsigned int *nbytesp, - unsigned int *transfer_idp); +int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp); + +int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp); /*==================CE Engine Initialization=======================*/ -/* Initialize an instance of a CE */ -struct ce_state *ath10k_ce_init(struct ath10k *ar, - unsigned int ce_id, - const struct ce_attr *attr); +int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, + const struct ce_attr *attr); +void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id); +int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, + const struct ce_attr *attr); +void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id); /*==================CE Engine Shutdown=======================*/ /* @@ -282,49 +233,59 @@ struct ce_state *ath10k_ce_init(struct ath10k *ar, * receive buffers. Target DMA must be stopped before using * this API. */ -int ath10k_ce_revoke_recv_next(struct ce_state *ce_state, +int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, - u32 *bufferp); + dma_addr_t *bufferp); + +int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp, + unsigned int *nbytesp); /* * Support clean shutdown by allowing the caller to cancel * pending sends. Target DMA must be stopped before using * this API. */ -int ath10k_ce_cancel_send_next(struct ce_state *ce_state, +int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, - u32 *bufferp, + dma_addr_t *bufferp, unsigned int *nbytesp, unsigned int *transfer_idp); -void ath10k_ce_deinit(struct ce_state *ce_state); - /*==================CE Interrupt Handlers====================*/ void ath10k_ce_per_engine_service_any(struct ath10k *ar); void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id); +void ath10k_ce_disable_interrupt(struct ath10k *ar, int ce_id); void ath10k_ce_disable_interrupts(struct ath10k *ar); +void ath10k_ce_enable_interrupt(struct ath10k *ar, int ce_id); +void ath10k_ce_enable_interrupts(struct ath10k *ar); +void ath10k_ce_dump_registers(struct ath10k *ar, + struct ath10k_fw_crash_data *crash_data); + +void ath10k_ce_alloc_rri(struct ath10k *ar); +void ath10k_ce_free_rri(struct ath10k *ar); /* ce_attr.flags values */ /* Use NonSnooping PCIe accesses? */ -#define CE_ATTR_NO_SNOOP 1 +#define CE_ATTR_NO_SNOOP BIT(0) /* Byte swap data words */ -#define CE_ATTR_BYTE_SWAP_DATA 2 +#define CE_ATTR_BYTE_SWAP_DATA BIT(1) /* Swizzle descriptors? */ -#define CE_ATTR_SWIZZLE_DESCRIPTORS 4 +#define CE_ATTR_SWIZZLE_DESCRIPTORS BIT(2) /* no interrupt on copy completion */ -#define CE_ATTR_DIS_INTR 8 +#define CE_ATTR_DIS_INTR BIT(3) + +/* no interrupt, only polling */ +#define CE_ATTR_POLL BIT(4) /* Attributes of an instance of a Copy Engine */ struct ce_attr { /* CE_ATTR_* values */ unsigned int flags; - /* currently not in use */ - unsigned int priority; - /* #entries in source ring - Must be a power of 2 */ unsigned int src_nentries; @@ -337,157 +298,51 @@ struct ce_attr { /* #entries in destination ring - Must be a power of 2 */ unsigned int dest_nentries; - /* Future use */ - void *reserved; + void (*send_cb)(struct ath10k_ce_pipe *); + void (*recv_cb)(struct ath10k_ce_pipe *); }; -/* - * When using sendlist_send to transfer multiple buffer fragments, the - * transfer context of each fragment, except last one, will be filled - * with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for - * each fragment done with send and the transfer context would be - * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the - * status of a send completion. - */ -#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef) - -#define SR_BA_ADDRESS 0x0000 -#define SR_SIZE_ADDRESS 0x0004 -#define DR_BA_ADDRESS 0x0008 -#define DR_SIZE_ADDRESS 0x000c -#define CE_CMD_ADDRESS 0x0018 - -#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MSB 17 -#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17 -#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000 -#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \ - (((0 | (x)) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \ - CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) - -#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MSB 16 -#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16 -#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000 -#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_GET(x) \ - (((x) & CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) >> \ - CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) -#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \ - (((0 | (x)) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \ - CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) - -#define CE_CTRL1_DMAX_LENGTH_MSB 15 -#define CE_CTRL1_DMAX_LENGTH_LSB 0 -#define CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff -#define CE_CTRL1_DMAX_LENGTH_GET(x) \ - (((x) & CE_CTRL1_DMAX_LENGTH_MASK) >> CE_CTRL1_DMAX_LENGTH_LSB) -#define CE_CTRL1_DMAX_LENGTH_SET(x) \ - (((0 | (x)) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK) - -#define CE_CTRL1_ADDRESS 0x0010 -#define CE_CTRL1_HW_MASK 0x0007ffff -#define CE_CTRL1_SW_MASK 0x0007ffff -#define CE_CTRL1_HW_WRITE_MASK 0x00000000 -#define CE_CTRL1_SW_WRITE_MASK 0x0007ffff -#define CE_CTRL1_RSTMASK 0xffffffff -#define CE_CTRL1_RESET 0x00000080 - -#define CE_CMD_HALT_STATUS_MSB 3 -#define CE_CMD_HALT_STATUS_LSB 3 -#define CE_CMD_HALT_STATUS_MASK 0x00000008 -#define CE_CMD_HALT_STATUS_GET(x) \ - (((x) & CE_CMD_HALT_STATUS_MASK) >> CE_CMD_HALT_STATUS_LSB) -#define CE_CMD_HALT_STATUS_SET(x) \ - (((0 | (x)) << CE_CMD_HALT_STATUS_LSB) & CE_CMD_HALT_STATUS_MASK) -#define CE_CMD_HALT_STATUS_RESET 0 -#define CE_CMD_HALT_MSB 0 -#define CE_CMD_HALT_MASK 0x00000001 - -#define HOST_IE_COPY_COMPLETE_MSB 0 -#define HOST_IE_COPY_COMPLETE_LSB 0 -#define HOST_IE_COPY_COMPLETE_MASK 0x00000001 -#define HOST_IE_COPY_COMPLETE_GET(x) \ - (((x) & HOST_IE_COPY_COMPLETE_MASK) >> HOST_IE_COPY_COMPLETE_LSB) -#define HOST_IE_COPY_COMPLETE_SET(x) \ - (((0 | (x)) << HOST_IE_COPY_COMPLETE_LSB) & HOST_IE_COPY_COMPLETE_MASK) -#define HOST_IE_COPY_COMPLETE_RESET 0 -#define HOST_IE_ADDRESS 0x002c - -#define HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010 -#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008 -#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004 -#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002 -#define HOST_IS_COPY_COMPLETE_MASK 0x00000001 -#define HOST_IS_ADDRESS 0x0030 - -#define MISC_IE_ADDRESS 0x0034 - -#define MISC_IS_AXI_ERR_MASK 0x00000400 - -#define MISC_IS_DST_ADDR_ERR_MASK 0x00000200 -#define MISC_IS_SRC_LEN_ERR_MASK 0x00000100 -#define MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080 -#define MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040 -#define MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020 - -#define MISC_IS_ADDRESS 0x0038 - -#define SR_WR_INDEX_ADDRESS 0x003c - -#define DST_WR_INDEX_ADDRESS 0x0040 - -#define CURRENT_SRRI_ADDRESS 0x0044 - -#define CURRENT_DRRI_ADDRESS 0x0048 - -#define SRC_WATERMARK_LOW_MSB 31 -#define SRC_WATERMARK_LOW_LSB 16 -#define SRC_WATERMARK_LOW_MASK 0xffff0000 -#define SRC_WATERMARK_LOW_GET(x) \ - (((x) & SRC_WATERMARK_LOW_MASK) >> SRC_WATERMARK_LOW_LSB) -#define SRC_WATERMARK_LOW_SET(x) \ - (((0 | (x)) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK) -#define SRC_WATERMARK_LOW_RESET 0 -#define SRC_WATERMARK_HIGH_MSB 15 -#define SRC_WATERMARK_HIGH_LSB 0 -#define SRC_WATERMARK_HIGH_MASK 0x0000ffff -#define SRC_WATERMARK_HIGH_GET(x) \ - (((x) & SRC_WATERMARK_HIGH_MASK) >> SRC_WATERMARK_HIGH_LSB) -#define SRC_WATERMARK_HIGH_SET(x) \ - (((0 | (x)) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK) -#define SRC_WATERMARK_HIGH_RESET 0 -#define SRC_WATERMARK_ADDRESS 0x004c - -#define DST_WATERMARK_LOW_LSB 16 -#define DST_WATERMARK_LOW_MASK 0xffff0000 -#define DST_WATERMARK_LOW_SET(x) \ - (((0 | (x)) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK) -#define DST_WATERMARK_LOW_RESET 0 -#define DST_WATERMARK_HIGH_MSB 15 -#define DST_WATERMARK_HIGH_LSB 0 -#define DST_WATERMARK_HIGH_MASK 0x0000ffff -#define DST_WATERMARK_HIGH_GET(x) \ - (((x) & DST_WATERMARK_HIGH_MASK) >> DST_WATERMARK_HIGH_LSB) -#define DST_WATERMARK_HIGH_SET(x) \ - (((0 | (x)) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK) -#define DST_WATERMARK_HIGH_RESET 0 -#define DST_WATERMARK_ADDRESS 0x0050 - - -static inline u32 ath10k_ce_base_address(unsigned int ce_id) +struct ath10k_ce_ops { + struct ath10k_ce_ring *(*ce_alloc_src_ring)(struct ath10k *ar, + u32 ce_id, + const struct ce_attr *attr); + struct ath10k_ce_ring *(*ce_alloc_dst_ring)(struct ath10k *ar, + u32 ce_id, + const struct ce_attr *attr); + int (*ce_rx_post_buf)(struct ath10k_ce_pipe *pipe, void *ctx, + dma_addr_t paddr); + int (*ce_completed_recv_next_nolock)(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp, + u32 *nbytesp); + int (*ce_revoke_recv_next)(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp, + dma_addr_t *nbytesp); + void (*ce_extract_desc_data)(struct ath10k *ar, + struct ath10k_ce_ring *src_ring, + u32 sw_index, dma_addr_t *bufferp, + u32 *nbytesp, u32 *transfer_idp); + void (*ce_free_pipe)(struct ath10k *ar, int ce_id); + int (*ce_send_nolock)(struct ath10k_ce_pipe *pipe, + void *per_transfer_context, + dma_addr_t buffer, u32 nbytes, + u32 transfer_id, u32 flags); + void (*ce_set_src_ring_base_addr_hi)(struct ath10k *ar, + u32 ce_ctrl_addr, + u64 addr); + void (*ce_set_dest_ring_base_addr_hi)(struct ath10k *ar, + u32 ce_ctrl_addr, + u64 addr); + int (*ce_completed_send_next_nolock)(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp); +}; + +static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id) { return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id; } -#define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK | \ - HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \ - HOST_IS_DST_RING_LOW_WATERMARK_MASK | \ - HOST_IS_DST_RING_HIGH_WATERMARK_MASK) - -#define CE_ERROR_MASK (MISC_IS_AXI_ERR_MASK | \ - MISC_IS_DST_ADDR_ERR_MASK | \ - MISC_IS_SRC_LEN_ERR_MASK | \ - MISC_IS_DST_MAX_LEN_VIO_MASK | \ - MISC_IS_DST_RING_OVERFLOW_MASK | \ - MISC_IS_SRC_RING_OVERFLOW_MASK) +#define COPY_ENGINE_ID(COPY_ENGINE_BASE_ADDRESS) (((COPY_ENGINE_BASE_ADDRESS) \ + - CE0_BASE_ADDRESS) / (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS)) #define CE_SRC_RING_TO_DESC(baddr, idx) \ (&(((struct ce_desc *)baddr)[idx])) @@ -495,22 +350,78 @@ static inline u32 ath10k_ce_base_address(unsigned int ce_id) #define CE_DEST_RING_TO_DESC(baddr, idx) \ (&(((struct ce_desc *)baddr)[idx])) +#define CE_SRC_RING_TO_DESC_64(baddr, idx) \ + (&(((struct ce_desc_64 *)baddr)[idx])) + +#define CE_DEST_RING_TO_DESC_64(baddr, idx) \ + (&(((struct ce_desc_64 *)baddr)[idx])) + /* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */ #define CE_RING_DELTA(nentries_mask, fromidx, toidx) \ - (((int)(toidx)-(int)(fromidx)) & (nentries_mask)) + (((int)(toidx) - (int)(fromidx)) & (nentries_mask)) #define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask)) +#define CE_RING_IDX_ADD(nentries_mask, idx, num) \ + (((idx) + (num)) & (nentries_mask)) -#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8 -#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00 +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \ + ar->regs->ce_wrap_intr_sum_host_msi_lsb +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK \ + ar->regs->ce_wrap_intr_sum_host_msi_mask #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \ (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \ CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB) #define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000 -#define CE_INTERRUPT_SUMMARY(ar) \ - CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \ - ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \ - CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)) +static inline u32 ath10k_ce_interrupt_summary(struct ath10k *ar) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + return CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( + ce->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS + + CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)); +} + +/* Host software's Copy Engine configuration. */ +#define CE_ATTR_FLAGS 0 + +/* + * Configuration information for a Copy Engine pipe. + * Passed from Host to Target during startup (one per CE). + * + * NOTE: Structure is shared between Host software and Target firmware! + */ +struct ce_pipe_config { + __le32 pipenum; + __le32 pipedir; + __le32 nentries; + __le32 nbytes_max; + __le32 flags; + __le32 reserved; +}; + +/* + * Directions for interconnect pipe configuration. + * These definitions may be used during configuration and are shared + * between Host and Target. + * + * Pipe Directions are relative to the Host, so PIPEDIR_IN means + * "coming IN over air through Target to Host" as with a WiFi Rx operation. + * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air" + * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man" + * Target since things that are "PIPEDIR_OUT" are coming IN to the Target + * over the interconnect. + */ +#define PIPEDIR_NONE 0 +#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */ +#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */ +#define PIPEDIR_INOUT 3 /* bidirectional */ + +/* Establish a mapping between a service/direction and a pipe. */ +struct ce_service_to_pipe { + __le32 service_id; + __le32 pipedir; + __le32 pipenum; +}; #endif /* _CE_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 2b3426b1ff3f..7c2939cbde5f 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1,22 +1,21 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ +#include <linux/export.h> #include <linux/module.h> #include <linux/firmware.h> +#include <linux/of.h> +#include <linux/property.h> +#include <linux/dmi.h> +#include <linux/ctype.h> +#include <linux/pm_qos.h> +#include <linux/nvmem-consumer.h> +#include <asm/byteorder.h> #include "core.h" #include "mac.h" @@ -26,99 +25,843 @@ #include "bmi.h" #include "debug.h" #include "htt.h" +#include "testmode.h" +#include "wmi-ops.h" +#include "coredump.h" +#include "leds.h" unsigned int ath10k_debug_mask; +EXPORT_SYMBOL(ath10k_debug_mask); + +static unsigned int ath10k_cryptmode_param; static bool uart_print; -static unsigned int ath10k_p2p; +static bool skip_otp; +static bool fw_diag_log; + +/* frame mode values are mapped as per enum ath10k_hw_txrx_mode */ +unsigned int ath10k_frame_mode = ATH10K_HW_TXRX_NATIVE_WIFI; + +unsigned long ath10k_coredump_mask = BIT(ATH10K_FW_CRASH_DUMP_REGISTERS) | + BIT(ATH10K_FW_CRASH_DUMP_CE_DATA); + +/* FIXME: most of these should be readonly */ module_param_named(debug_mask, ath10k_debug_mask, uint, 0644); +module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644); module_param(uart_print, bool, 0644); -module_param_named(p2p, ath10k_p2p, uint, 0644); +module_param(skip_otp, bool, 0644); +module_param(fw_diag_log, bool, 0644); +module_param_named(frame_mode, ath10k_frame_mode, uint, 0644); +module_param_named(coredump_mask, ath10k_coredump_mask, ulong, 0444); + MODULE_PARM_DESC(debug_mask, "Debugging mask"); MODULE_PARM_DESC(uart_print, "Uart target debugging"); -MODULE_PARM_DESC(p2p, "Enable ath10k P2P support"); +MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode"); +MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software"); +MODULE_PARM_DESC(frame_mode, + "Datapath frame mode (0: raw, 1: native wifi (default), 2: ethernet)"); +MODULE_PARM_DESC(coredump_mask, "Bitfield of what to include in firmware crash file"); +MODULE_PARM_DESC(fw_diag_log, "Diag based fw log debugging"); static const struct ath10k_hw_params ath10k_hw_params_list[] = { { - .id = QCA988X_HW_1_0_VERSION, - .name = "qca988x hw1.0", - .patch_load_addr = QCA988X_HW_1_0_PATCH_LOAD_ADDR, + .id = QCA988X_HW_2_0_VERSION, + .dev_id = QCA988X_2_0_DEVICE_ID, + .bus = ATH10K_BUS_PCI, + .name = "qca988x hw2.0", + .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .led_pin = 1, + .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, + .max_probe_resp_desc_thres = 0, + .cal_data_len = 2116, .fw = { - .dir = QCA988X_HW_1_0_FW_DIR, - .fw = QCA988X_HW_1_0_FW_FILE, - .otp = QCA988X_HW_1_0_OTP_FILE, - .board = QCA988X_HW_1_0_BOARD_DATA_FILE, + .dir = QCA988X_HW_2_0_FW_DIR, + .board_size = QCA988X_BOARD_DATA_SZ, + .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ, }, + .rx_desc_ops = &qca988x_rx_desc_ops, + .hw_ops = &qca988x_ops, + .decap_align_bytes = 4, + .spectral_bin_discard = 0, + .spectral_bin_offset = 0, + .vht160_mcs_rx_highest = 0, + .vht160_mcs_tx_highest = 0, + .n_cipher_suites = 8, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = false, + .credit_size_workaround = false, + .tx_stats_over_pktlog = true, + .dynamic_sar_support = false, + .hw_restart_disconnect = false, + .use_fw_tx_credits = true, + .delay_unmap_buffer = false, + .mcast_frame_registration = false, }, { .id = QCA988X_HW_2_0_VERSION, - .name = "qca988x hw2.0", + .dev_id = QCA988X_2_0_DEVICE_ID_UBNT, + .name = "qca988x hw2.0 ubiquiti", .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .led_pin = 0, + .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, + .max_probe_resp_desc_thres = 0, + .cal_data_len = 2116, .fw = { .dir = QCA988X_HW_2_0_FW_DIR, - .fw = QCA988X_HW_2_0_FW_FILE, - .otp = QCA988X_HW_2_0_OTP_FILE, - .board = QCA988X_HW_2_0_BOARD_DATA_FILE, + .board_size = QCA988X_BOARD_DATA_SZ, + .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ, + }, + .rx_desc_ops = &qca988x_rx_desc_ops, + .hw_ops = &qca988x_ops, + .decap_align_bytes = 4, + .spectral_bin_discard = 0, + .spectral_bin_offset = 0, + .vht160_mcs_rx_highest = 0, + .vht160_mcs_tx_highest = 0, + .n_cipher_suites = 8, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = false, + .credit_size_workaround = false, + .tx_stats_over_pktlog = true, + .dynamic_sar_support = false, + .hw_restart_disconnect = false, + .use_fw_tx_credits = true, + .delay_unmap_buffer = false, + .mcast_frame_registration = false, + }, + { + .id = QCA9887_HW_1_0_VERSION, + .dev_id = QCA9887_1_0_DEVICE_ID, + .bus = ATH10K_BUS_PCI, + .name = "qca9887 hw1.0", + .patch_load_addr = QCA9887_HW_1_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .led_pin = 1, + .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, + .max_probe_resp_desc_thres = 0, + .cal_data_len = 2116, + .fw = { + .dir = QCA9887_HW_1_0_FW_DIR, + .board_size = QCA9887_BOARD_DATA_SZ, + .board_ext_size = QCA9887_BOARD_EXT_DATA_SZ, + }, + .rx_desc_ops = &qca988x_rx_desc_ops, + .hw_ops = &qca988x_ops, + .decap_align_bytes = 4, + .spectral_bin_discard = 0, + .spectral_bin_offset = 0, + .vht160_mcs_rx_highest = 0, + .vht160_mcs_tx_highest = 0, + .n_cipher_suites = 8, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = false, + .credit_size_workaround = false, + .tx_stats_over_pktlog = false, + .dynamic_sar_support = false, + .hw_restart_disconnect = false, + .use_fw_tx_credits = true, + .delay_unmap_buffer = false, + .mcast_frame_registration = false, + }, + { + .id = QCA6174_HW_3_2_VERSION, + .dev_id = QCA6174_3_2_DEVICE_ID, + .bus = ATH10K_BUS_SDIO, + .name = "qca6174 hw3.2 sdio", + .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, + .uart_pin = 19, + .led_pin = 0, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, + .max_probe_resp_desc_thres = 0, + .cal_data_len = 0, + .fw = { + .dir = QCA6174_HW_3_0_FW_DIR, + .board_size = QCA6174_BOARD_DATA_SZ, + .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, + }, + .rx_desc_ops = &qca988x_rx_desc_ops, + .hw_ops = &qca6174_sdio_ops, + .hw_clk = qca6174_clk, + .target_cpu_freq = 176000000, + .decap_align_bytes = 4, + .n_cipher_suites = 8, + .num_peers = 10, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .uart_pin_workaround = true, + .tx_stats_over_pktlog = false, + .credit_size_workaround = false, + .bmi_large_size_download = true, + .supports_peer_stats_info = true, + .dynamic_sar_support = true, + .hw_restart_disconnect = false, + .use_fw_tx_credits = true, + .delay_unmap_buffer = false, + .mcast_frame_registration = false, + }, + { + .id = QCA6174_HW_2_1_VERSION, + .dev_id = QCA6164_2_1_DEVICE_ID, + .bus = ATH10K_BUS_PCI, + .name = "qca6164 hw2.1", + .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR, + .uart_pin = 6, + .led_pin = 0, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, + .max_probe_resp_desc_thres = 0, + .cal_data_len = 8124, + .fw = { + .dir = QCA6174_HW_2_1_FW_DIR, + .board_size = QCA6174_BOARD_DATA_SZ, + .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, + }, + .rx_desc_ops = &qca988x_rx_desc_ops, + .hw_ops = &qca988x_ops, + .decap_align_bytes = 4, + .spectral_bin_discard = 0, + .spectral_bin_offset = 0, + .vht160_mcs_rx_highest = 0, + .vht160_mcs_tx_highest = 0, + .n_cipher_suites = 8, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = false, + .credit_size_workaround = false, + .tx_stats_over_pktlog = false, + .dynamic_sar_support = false, + .hw_restart_disconnect = false, + .use_fw_tx_credits = true, + .delay_unmap_buffer = false, + .mcast_frame_registration = false, + }, + { + .id = QCA6174_HW_2_1_VERSION, + .dev_id = QCA6174_2_1_DEVICE_ID, + .bus = ATH10K_BUS_PCI, + .name = "qca6174 hw2.1", + .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR, + .uart_pin = 6, + .led_pin = 0, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, + .max_probe_resp_desc_thres = 0, + .cal_data_len = 8124, + .fw = { + .dir = QCA6174_HW_2_1_FW_DIR, + .board_size = QCA6174_BOARD_DATA_SZ, + .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, + }, + .rx_desc_ops = &qca988x_rx_desc_ops, + .hw_ops = &qca988x_ops, + .decap_align_bytes = 4, + .spectral_bin_discard = 0, + .spectral_bin_offset = 0, + .vht160_mcs_rx_highest = 0, + .vht160_mcs_tx_highest = 0, + .n_cipher_suites = 8, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = false, + .credit_size_workaround = false, + .tx_stats_over_pktlog = false, + .dynamic_sar_support = false, + .hw_restart_disconnect = false, + .use_fw_tx_credits = true, + .delay_unmap_buffer = false, + .mcast_frame_registration = false, + }, + { + .id = QCA6174_HW_3_0_VERSION, + .dev_id = QCA6174_2_1_DEVICE_ID, + .bus = ATH10K_BUS_PCI, + .name = "qca6174 hw3.0", + .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, + .uart_pin = 6, + .led_pin = 0, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, + .max_probe_resp_desc_thres = 0, + .cal_data_len = 8124, + .fw = { + .dir = QCA6174_HW_3_0_FW_DIR, + .board_size = QCA6174_BOARD_DATA_SZ, + .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, + }, + .rx_desc_ops = &qca988x_rx_desc_ops, + .hw_ops = &qca988x_ops, + .decap_align_bytes = 4, + .spectral_bin_discard = 0, + .spectral_bin_offset = 0, + .vht160_mcs_rx_highest = 0, + .vht160_mcs_tx_highest = 0, + .n_cipher_suites = 8, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = false, + .credit_size_workaround = false, + .tx_stats_over_pktlog = false, + .dynamic_sar_support = false, + .hw_restart_disconnect = false, + .use_fw_tx_credits = true, + .delay_unmap_buffer = false, + .mcast_frame_registration = false, + }, + { + .id = QCA6174_HW_3_2_VERSION, + .dev_id = QCA6174_2_1_DEVICE_ID, + .bus = ATH10K_BUS_PCI, + .name = "qca6174 hw3.2", + .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, + .uart_pin = 6, + .led_pin = 0, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, + .max_probe_resp_desc_thres = 0, + .cal_data_len = 8124, + .fw = { + /* uses same binaries as hw3.0 */ + .dir = QCA6174_HW_3_0_FW_DIR, + .board_size = QCA6174_BOARD_DATA_SZ, + .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, + }, + .rx_desc_ops = &qca988x_rx_desc_ops, + .hw_ops = &qca6174_ops, + .hw_clk = qca6174_clk, + .target_cpu_freq = 176000000, + .decap_align_bytes = 4, + .spectral_bin_discard = 0, + .spectral_bin_offset = 0, + .vht160_mcs_rx_highest = 0, + .vht160_mcs_tx_highest = 0, + .n_cipher_suites = 8, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = true, + .credit_size_workaround = false, + .tx_stats_over_pktlog = false, + .supports_peer_stats_info = true, + .dynamic_sar_support = true, + .hw_restart_disconnect = false, + .use_fw_tx_credits = true, + .delay_unmap_buffer = false, + .mcast_frame_registration = true, + }, + { + .id = QCA99X0_HW_2_0_DEV_VERSION, + .dev_id = QCA99X0_2_0_DEVICE_ID, + .bus = ATH10K_BUS_PCI, + .name = "qca99x0 hw2.0", + .patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .led_pin = 17, + .otp_exe_param = 0x00000700, + .continuous_frag_desc = true, + .cck_rate_map_rev2 = true, + .channel_counters_freq_hz = 150000, + .max_probe_resp_desc_thres = 24, + .tx_chain_mask = 0xf, + .rx_chain_mask = 0xf, + .max_spatial_stream = 4, + .cal_data_len = 12064, + .fw = { + .dir = QCA99X0_HW_2_0_FW_DIR, + .board_size = QCA99X0_BOARD_DATA_SZ, + .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ, + }, + .sw_decrypt_mcast_mgmt = true, + .rx_desc_ops = &qca99x0_rx_desc_ops, + .hw_ops = &qca99x0_ops, + .decap_align_bytes = 1, + .spectral_bin_discard = 4, + .spectral_bin_offset = 0, + .vht160_mcs_rx_highest = 0, + .vht160_mcs_tx_highest = 0, + .n_cipher_suites = 11, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = false, + .credit_size_workaround = false, + .tx_stats_over_pktlog = false, + .dynamic_sar_support = false, + .hw_restart_disconnect = false, + .use_fw_tx_credits = true, + .delay_unmap_buffer = false, + .mcast_frame_registration = false, + }, + { + .id = QCA9984_HW_1_0_DEV_VERSION, + .dev_id = QCA9984_1_0_DEVICE_ID, + .bus = ATH10K_BUS_PCI, + .name = "qca9984/qca9994 hw1.0", + .patch_load_addr = QCA9984_HW_1_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .led_pin = 17, + .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH, + .otp_exe_param = 0x00000700, + .continuous_frag_desc = true, + .cck_rate_map_rev2 = true, + .channel_counters_freq_hz = 150000, + .max_probe_resp_desc_thres = 24, + .tx_chain_mask = 0xf, + .rx_chain_mask = 0xf, + .max_spatial_stream = 4, + .cal_data_len = 12064, + .fw = { + .dir = QCA9984_HW_1_0_FW_DIR, + .board_size = QCA99X0_BOARD_DATA_SZ, + .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ, + .ext_board_size = QCA99X0_EXT_BOARD_DATA_SZ, }, + .sw_decrypt_mcast_mgmt = true, + .rx_desc_ops = &qca99x0_rx_desc_ops, + .hw_ops = &qca99x0_ops, + .decap_align_bytes = 1, + .spectral_bin_discard = 12, + .spectral_bin_offset = 8, + + /* Can do only 2x2 VHT160 or 80+80. 1560Mbps is 4x4 80Mhz + * or 2x2 160Mhz, long-guard-interval. + */ + .vht160_mcs_rx_highest = 1560, + .vht160_mcs_tx_highest = 1560, + .n_cipher_suites = 11, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = false, + .credit_size_workaround = false, + .tx_stats_over_pktlog = false, + .dynamic_sar_support = false, + .hw_restart_disconnect = false, + .use_fw_tx_credits = true, + .delay_unmap_buffer = false, + .mcast_frame_registration = false, + }, + { + .id = QCA9888_HW_2_0_DEV_VERSION, + .dev_id = QCA9888_2_0_DEVICE_ID, + .bus = ATH10K_BUS_PCI, + .name = "qca9888 hw2.0", + .patch_load_addr = QCA9888_HW_2_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .led_pin = 17, + .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH, + .otp_exe_param = 0x00000700, + .continuous_frag_desc = true, + .channel_counters_freq_hz = 150000, + .max_probe_resp_desc_thres = 24, + .tx_chain_mask = 3, + .rx_chain_mask = 3, + .max_spatial_stream = 2, + .cal_data_len = 12064, + .fw = { + .dir = QCA9888_HW_2_0_FW_DIR, + .board_size = QCA99X0_BOARD_DATA_SZ, + .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ, + }, + .sw_decrypt_mcast_mgmt = true, + .rx_desc_ops = &qca99x0_rx_desc_ops, + .hw_ops = &qca99x0_ops, + .decap_align_bytes = 1, + .spectral_bin_discard = 12, + .spectral_bin_offset = 8, + + /* Can do only 1x1 VHT160 or 80+80. 780Mbps is 2x2 80Mhz or + * 1x1 160Mhz, long-guard-interval. + */ + .vht160_mcs_rx_highest = 780, + .vht160_mcs_tx_highest = 780, + .n_cipher_suites = 11, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = false, + .credit_size_workaround = false, + .tx_stats_over_pktlog = false, + .dynamic_sar_support = false, + .hw_restart_disconnect = false, + .use_fw_tx_credits = true, + .delay_unmap_buffer = false, + .mcast_frame_registration = false, + }, + { + .id = QCA9377_HW_1_0_DEV_VERSION, + .dev_id = QCA9377_1_0_DEVICE_ID, + .bus = ATH10K_BUS_PCI, + .name = "qca9377 hw1.0", + .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR, + .uart_pin = 6, + .led_pin = 0, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, + .max_probe_resp_desc_thres = 0, + .cal_data_len = 8124, + .fw = { + .dir = QCA9377_HW_1_0_FW_DIR, + .board_size = QCA9377_BOARD_DATA_SZ, + .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ, + }, + .rx_desc_ops = &qca988x_rx_desc_ops, + .hw_ops = &qca988x_ops, + .decap_align_bytes = 4, + .spectral_bin_discard = 0, + .spectral_bin_offset = 0, + .vht160_mcs_rx_highest = 0, + .vht160_mcs_tx_highest = 0, + .n_cipher_suites = 8, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = false, + .credit_size_workaround = false, + .tx_stats_over_pktlog = false, + .dynamic_sar_support = false, + .hw_restart_disconnect = false, + .use_fw_tx_credits = true, + .delay_unmap_buffer = false, + .mcast_frame_registration = false, + }, + { + .id = QCA9377_HW_1_1_DEV_VERSION, + .dev_id = QCA9377_1_0_DEVICE_ID, + .bus = ATH10K_BUS_PCI, + .name = "qca9377 hw1.1", + .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR, + .uart_pin = 6, + .led_pin = 0, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, + .max_probe_resp_desc_thres = 0, + .cal_data_len = 8124, + .fw = { + .dir = QCA9377_HW_1_0_FW_DIR, + .board_size = QCA9377_BOARD_DATA_SZ, + .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ, + }, + .rx_desc_ops = &qca988x_rx_desc_ops, + .hw_ops = &qca6174_ops, + .hw_clk = qca6174_clk, + .target_cpu_freq = 176000000, + .decap_align_bytes = 4, + .spectral_bin_discard = 0, + .spectral_bin_offset = 0, + .vht160_mcs_rx_highest = 0, + .vht160_mcs_tx_highest = 0, + .n_cipher_suites = 8, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = true, + .credit_size_workaround = false, + .tx_stats_over_pktlog = false, + .dynamic_sar_support = false, + .hw_restart_disconnect = false, + .use_fw_tx_credits = true, + .delay_unmap_buffer = false, + .mcast_frame_registration = false, + }, + { + .id = QCA9377_HW_1_1_DEV_VERSION, + .dev_id = QCA9377_1_0_DEVICE_ID, + .bus = ATH10K_BUS_SDIO, + .name = "qca9377 hw1.1 sdio", + .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR, + .uart_pin = 19, + .led_pin = 0, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, + .max_probe_resp_desc_thres = 0, + .cal_data_len = 8124, + .fw = { + .dir = QCA9377_HW_1_0_FW_DIR, + .board_size = QCA9377_BOARD_DATA_SZ, + .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ, + }, + .rx_desc_ops = &qca988x_rx_desc_ops, + .hw_ops = &qca6174_ops, + .hw_clk = qca6174_clk, + .target_cpu_freq = 176000000, + .decap_align_bytes = 4, + .n_cipher_suites = 8, + .num_peers = TARGET_QCA9377_HL_NUM_PEERS, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .uart_pin_workaround = true, + .credit_size_workaround = true, + .dynamic_sar_support = false, + .hw_restart_disconnect = false, + .use_fw_tx_credits = true, + .delay_unmap_buffer = false, + .mcast_frame_registration = false, + }, + { + .id = QCA4019_HW_1_0_DEV_VERSION, + .dev_id = 0, + .bus = ATH10K_BUS_AHB, + .name = "qca4019 hw1.0", + .patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .led_pin = 0, + .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH, + .otp_exe_param = 0x0010000, + .continuous_frag_desc = true, + .cck_rate_map_rev2 = true, + .channel_counters_freq_hz = 125000, + .max_probe_resp_desc_thres = 24, + .tx_chain_mask = 0x3, + .rx_chain_mask = 0x3, + .max_spatial_stream = 2, + .cal_data_len = 12064, + .fw = { + .dir = QCA4019_HW_1_0_FW_DIR, + .board_size = QCA4019_BOARD_DATA_SZ, + .board_ext_size = QCA4019_BOARD_EXT_DATA_SZ, + }, + .sw_decrypt_mcast_mgmt = true, + .rx_desc_ops = &qca99x0_rx_desc_ops, + .hw_ops = &qca99x0_ops, + .decap_align_bytes = 1, + .spectral_bin_discard = 4, + .spectral_bin_offset = 0, + .vht160_mcs_rx_highest = 0, + .vht160_mcs_tx_highest = 0, + .n_cipher_suites = 11, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = false, + .credit_size_workaround = false, + .tx_stats_over_pktlog = false, + .dynamic_sar_support = false, + .hw_restart_disconnect = false, + .use_fw_tx_credits = true, + .delay_unmap_buffer = false, + .mcast_frame_registration = false, + }, + { + .id = WCN3990_HW_1_0_DEV_VERSION, + .dev_id = 0, + .bus = ATH10K_BUS_SNOC, + .name = "wcn3990 hw1.0", + .led_pin = 0, + .continuous_frag_desc = true, + .tx_chain_mask = 0x7, + .rx_chain_mask = 0x7, + .max_spatial_stream = 4, + .fw = { + .dir = WCN3990_HW_1_0_FW_DIR, + .board_size = WCN3990_BOARD_DATA_SZ, + .board_ext_size = WCN3990_BOARD_EXT_DATA_SZ, + }, + .sw_decrypt_mcast_mgmt = true, + .rx_desc_ops = &wcn3990_rx_desc_ops, + .hw_ops = &wcn3990_ops, + .decap_align_bytes = 1, + .num_peers = TARGET_HL_TLV_NUM_PEERS, + .n_cipher_suites = 11, + .ast_skid_limit = TARGET_HL_TLV_AST_SKID_LIMIT, + .num_wds_entries = TARGET_HL_TLV_NUM_WDS_ENTRIES, + .target_64bit = true, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC, + .shadow_reg_support = true, + .rri_on_ddr = true, + .hw_filter_reset_required = false, + .fw_diag_ce_download = false, + .credit_size_workaround = false, + .tx_stats_over_pktlog = false, + .dynamic_sar_support = true, + .hw_restart_disconnect = true, + .use_fw_tx_credits = false, + .delay_unmap_buffer = true, + .mcast_frame_registration = false, }, }; -static void ath10k_send_suspend_complete(struct ath10k *ar) +static const char *const ath10k_core_fw_feature_str[] = { + [ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX] = "wmi-mgmt-rx", + [ATH10K_FW_FEATURE_WMI_10X] = "wmi-10.x", + [ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX] = "has-wmi-mgmt-tx", + [ATH10K_FW_FEATURE_NO_P2P] = "no-p2p", + [ATH10K_FW_FEATURE_WMI_10_2] = "wmi-10.2", + [ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT] = "multi-vif-ps", + [ATH10K_FW_FEATURE_WOWLAN_SUPPORT] = "wowlan", + [ATH10K_FW_FEATURE_IGNORE_OTP_RESULT] = "ignore-otp", + [ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING] = "no-4addr-pad", + [ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT] = "skip-clock-init", + [ATH10K_FW_FEATURE_RAW_MODE_SUPPORT] = "raw-mode", + [ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA] = "adaptive-cca", + [ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp", + [ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl", + [ATH10K_FW_FEATURE_BTCOEX_PARAM] = "btcoex-param", + [ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR] = "skip-null-func-war", + [ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST] = "allows-mesh-bcast", + [ATH10K_FW_FEATURE_NO_PS] = "no-ps", + [ATH10K_FW_FEATURE_MGMT_TX_BY_REF] = "mgmt-tx-by-reference", + [ATH10K_FW_FEATURE_NON_BMI] = "non-bmi", + [ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL] = "single-chan-info-per-channel", + [ATH10K_FW_FEATURE_PEER_FIXED_RATE] = "peer-fixed-rate", + [ATH10K_FW_FEATURE_IRAM_RECOVERY] = "iram-recovery", +}; + +static unsigned int ath10k_core_get_fw_feature_str(char *buf, + size_t buf_len, + enum ath10k_fw_features feat) { - ath10k_dbg(ATH10K_DBG_CORE, "%s\n", __func__); + /* make sure that ath10k_core_fw_feature_str[] gets updated */ + BUILD_BUG_ON(ARRAY_SIZE(ath10k_core_fw_feature_str) != + ATH10K_FW_FEATURE_COUNT); + + if (feat >= ARRAY_SIZE(ath10k_core_fw_feature_str) || + WARN_ON(!ath10k_core_fw_feature_str[feat])) { + return scnprintf(buf, buf_len, "bit%d", feat); + } - ar->is_target_paused = true; - wake_up(&ar->event_queue); + return scnprintf(buf, buf_len, "%s", ath10k_core_fw_feature_str[feat]); } -static int ath10k_check_fw_version(struct ath10k *ar) +void ath10k_core_get_fw_features_str(struct ath10k *ar, + char *buf, + size_t buf_len) { - char version[32]; + size_t len = 0; + int i; - if (ar->fw_version_major >= SUPPORTED_FW_MAJOR && - ar->fw_version_minor >= SUPPORTED_FW_MINOR && - ar->fw_version_release >= SUPPORTED_FW_RELEASE && - ar->fw_version_build >= SUPPORTED_FW_BUILD) - return 0; + for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) { + if (test_bit(i, ar->normal_mode_fw.fw_file.fw_features)) { + if (len > 0) + len += scnprintf(buf + len, buf_len - len, ","); - snprintf(version, sizeof(version), "%u.%u.%u.%u", - SUPPORTED_FW_MAJOR, SUPPORTED_FW_MINOR, - SUPPORTED_FW_RELEASE, SUPPORTED_FW_BUILD); + len += ath10k_core_get_fw_feature_str(buf + len, + buf_len - len, + i); + } + } +} - ath10k_warn("WARNING: Firmware version %s is not officially supported.\n", - ar->hw->wiphy->fw_version); - ath10k_warn("Please upgrade to version %s (or newer)\n", version); +static void ath10k_send_suspend_complete(struct ath10k *ar) +{ + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot suspend complete\n"); - return 0; + complete(&ar->target_suspend); } -static int ath10k_init_connect_htc(struct ath10k *ar) +static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode) { - int status; + bool mtu_workaround = ar->hw_params.credit_size_workaround; + int ret; + u32 param = 0; - status = ath10k_wmi_connect_htc_service(ar); - if (status) - goto conn_fail; + ret = ath10k_bmi_write32(ar, hi_mbox_io_block_sz, 256); + if (ret) + return ret; - /* Start HTC */ - status = ath10k_htc_start(ar->htc); - if (status) - goto conn_fail; + ret = ath10k_bmi_write32(ar, hi_mbox_isr_yield_limit, 99); + if (ret) + return ret; - /* Wait for WMI event to be ready */ - status = ath10k_wmi_wait_for_service_ready(ar); - if (status <= 0) { - ath10k_warn("wmi service ready event not received"); - status = -ETIMEDOUT; - goto timeout; - } + ret = ath10k_bmi_read32(ar, hi_acs_flags, ¶m); + if (ret) + return ret; - ath10k_dbg(ATH10K_DBG_CORE, "core wmi ready\n"); - return 0; + param |= HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET; -timeout: - ath10k_htc_stop(ar->htc); -conn_fail: - return status; + if (mode == ATH10K_FIRMWARE_MODE_NORMAL && !mtu_workaround) + param |= HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE; + else + param &= ~HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE; + + if (mode == ATH10K_FIRMWARE_MODE_UTF) + param &= ~HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET; + else + param |= HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET; + + ret = ath10k_bmi_write32(ar, hi_acs_flags, param); + if (ret) + return ret; + + ret = ath10k_bmi_read32(ar, hi_option_flag2, ¶m); + if (ret) + return ret; + + param |= HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_HOST; + + ret = ath10k_bmi_write32(ar, hi_option_flag2, param); + if (ret) + return ret; + + return 0; } static int ath10k_init_configure_target(struct ath10k *ar) @@ -130,14 +873,14 @@ static int ath10k_init_configure_target(struct ath10k *ar) ret = ath10k_bmi_write32(ar, hi_app_host_interest, HTC_PROTOCOL_VERSION); if (ret) { - ath10k_err("settings HTC version failed\n"); + ath10k_err(ar, "settings HTC version failed\n"); return ret; } /* set the firmware mode to STA/IBSS/AP */ ret = ath10k_bmi_read32(ar, hi_option_flag, ¶m_host); if (ret) { - ath10k_err("setting firmware mode (1/2) failed\n"); + ath10k_err(ar, "setting firmware mode (1/2) failed\n"); return ret; } @@ -156,14 +899,14 @@ static int ath10k_init_configure_target(struct ath10k *ar) ret = ath10k_bmi_write32(ar, hi_option_flag, param_host); if (ret) { - ath10k_err("setting firmware mode (2/2) failed\n"); + ath10k_err(ar, "setting firmware mode (2/2) failed\n"); return ret; } /* We do all byte-swapping on the host */ ret = ath10k_bmi_write32(ar, hi_be, 0); if (ret) { - ath10k_err("setting host CPU BE mode failed\n"); + ath10k_err(ar, "setting host CPU BE mode failed\n"); return ret; } @@ -171,7 +914,18 @@ static int ath10k_init_configure_target(struct ath10k *ar) ret = ath10k_bmi_write32(ar, hi_fw_swap, 0); if (ret) { - ath10k_err("setting FW data/desc swap flags failed\n"); + ath10k_err(ar, "setting FW data/desc swap flags failed\n"); + return ret; + } + + /* Some devices have a special sanity check that verifies the PCI + * Device ID is written to this host interest var. It is known to be + * required to boot QCA6164. + */ + ret = ath10k_bmi_write32(ar, hi_hci_uart_pwr_mgmt_params_ext, + ar->dev_id); + if (ret) { + ath10k_err(ar, "failed to set pwr_mgmt_params: %d\n", ret); return ret; } @@ -192,191 +946,1470 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar, if (dir == NULL) dir = "."; + if (ar->board_name) { + snprintf(filename, sizeof(filename), "%s/%s/%s", + dir, ar->board_name, file); + ret = firmware_request_nowarn(&fw, filename, ar->dev); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot fw request '%s': %d\n", + filename, ret); + if (!ret) + return fw; + } + snprintf(filename, sizeof(filename), "%s/%s", dir, file); - ret = request_firmware(&fw, filename, ar->dev); + ret = firmware_request_nowarn(&fw, filename, ar->dev); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot fw request '%s': %d\n", + filename, ret); if (ret) return ERR_PTR(ret); return fw; } -static int ath10k_push_board_ext_data(struct ath10k *ar, - const struct firmware *fw) +static int ath10k_push_board_ext_data(struct ath10k *ar, const void *data, + size_t data_len) { - u32 board_data_size = QCA988X_BOARD_DATA_SZ; - u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ; + u32 board_data_size = ar->hw_params.fw.board_size; + u32 board_ext_data_size = ar->hw_params.fw.board_ext_size; u32 board_ext_data_addr; int ret; ret = ath10k_bmi_read32(ar, hi_board_ext_data, &board_ext_data_addr); if (ret) { - ath10k_err("could not read board ext data addr (%d)\n", ret); + ath10k_err(ar, "could not read board ext data addr (%d)\n", + ret); return ret; } - ath10k_dbg(ATH10K_DBG_CORE, - "ath10k: Board extended Data download addr: 0x%x\n", + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot push board extended data addr 0x%x\n", board_ext_data_addr); if (board_ext_data_addr == 0) return 0; - if (fw->size != (board_data_size + board_ext_data_size)) { - ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n", - fw->size, board_data_size, board_ext_data_size); + if (data_len != (board_data_size + board_ext_data_size)) { + ath10k_err(ar, "invalid board (ext) data sizes %zu != %d+%d\n", + data_len, board_data_size, board_ext_data_size); return -EINVAL; } ret = ath10k_bmi_write_memory(ar, board_ext_data_addr, - fw->data + board_data_size, + data + board_data_size, board_ext_data_size); if (ret) { - ath10k_err("could not write board ext data (%d)\n", ret); + ath10k_err(ar, "could not write board ext data (%d)\n", ret); return ret; } ret = ath10k_bmi_write32(ar, hi_board_ext_data_config, (board_ext_data_size << 16) | 1); if (ret) { - ath10k_err("could not write board ext data bit (%d)\n", ret); + ath10k_err(ar, "could not write board ext data bit (%d)\n", + ret); return ret; } return 0; } -static int ath10k_download_board_data(struct ath10k *ar) +static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) +{ + u32 result, address; + u8 board_id, chip_id; + bool ext_bid_support; + int ret, bmi_board_id_param; + + address = ar->hw_params.patch_load_addr; + + if (!ar->normal_mode_fw.fw_file.otp_data || + !ar->normal_mode_fw.fw_file.otp_len) { + ath10k_warn(ar, + "failed to retrieve board id because of invalid otp\n"); + return -ENODATA; + } + + if (ar->id.bmi_ids_valid) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot already acquired valid otp board id,skip download, board_id %d chip_id %d\n", + ar->id.bmi_board_id, ar->id.bmi_chip_id); + goto skip_otp_download; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot upload otp to 0x%x len %zd for board id\n", + address, ar->normal_mode_fw.fw_file.otp_len); + + ret = ath10k_bmi_fast_download(ar, address, + ar->normal_mode_fw.fw_file.otp_data, + ar->normal_mode_fw.fw_file.otp_len); + if (ret) { + ath10k_err(ar, "could not write otp for board id check: %d\n", + ret); + return ret; + } + + if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT || + ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE || + ar->cal_mode == ATH10K_PRE_CAL_MODE_NVMEM) + bmi_board_id_param = BMI_PARAM_GET_FLASH_BOARD_ID; + else + bmi_board_id_param = BMI_PARAM_GET_EEPROM_BOARD_ID; + + ret = ath10k_bmi_execute(ar, address, bmi_board_id_param, &result); + if (ret) { + ath10k_err(ar, "could not execute otp for board id check: %d\n", + ret); + return ret; + } + + board_id = MS(result, ATH10K_BMI_BOARD_ID_FROM_OTP); + chip_id = MS(result, ATH10K_BMI_CHIP_ID_FROM_OTP); + ext_bid_support = (result & ATH10K_BMI_EXT_BOARD_ID_SUPPORT); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot get otp board id result 0x%08x board_id %d chip_id %d ext_bid_support %d\n", + result, board_id, chip_id, ext_bid_support); + + ar->id.ext_bid_supported = ext_bid_support; + + if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0 || + (board_id == 0)) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "board id does not exist in otp, ignore it\n"); + return -EOPNOTSUPP; + } + + ar->id.bmi_ids_valid = true; + ar->id.bmi_board_id = board_id; + ar->id.bmi_chip_id = chip_id; + +skip_otp_download: + + return 0; +} + +static void ath10k_core_check_bdfext(const struct dmi_header *hdr, void *data) +{ + struct ath10k *ar = data; + const char *bdf_ext; + const char *magic = ATH10K_SMBIOS_BDF_EXT_MAGIC; + u8 bdf_enabled; + int i; + + if (hdr->type != ATH10K_SMBIOS_BDF_EXT_TYPE) + return; + + if (hdr->length != ATH10K_SMBIOS_BDF_EXT_LENGTH) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "wrong smbios bdf ext type length (%d).\n", + hdr->length); + return; + } + + bdf_enabled = *((u8 *)hdr + ATH10K_SMBIOS_BDF_EXT_OFFSET); + if (!bdf_enabled) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not found.\n"); + return; + } + + /* Only one string exists (per spec) */ + bdf_ext = (char *)hdr + hdr->length; + + if (memcmp(bdf_ext, magic, strlen(magic)) != 0) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "bdf variant magic does not match.\n"); + return; + } + + for (i = 0; i < strlen(bdf_ext); i++) { + if (!isascii(bdf_ext[i]) || !isprint(bdf_ext[i])) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "bdf variant name contains non ascii chars.\n"); + return; + } + } + + /* Copy extension name without magic suffix */ + if (strscpy(ar->id.bdf_ext, bdf_ext + strlen(magic), + sizeof(ar->id.bdf_ext)) < 0) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "bdf variant string is longer than the buffer can accommodate (variant: %s)\n", + bdf_ext); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "found and validated bdf variant smbios_type 0x%x bdf %s\n", + ATH10K_SMBIOS_BDF_EXT_TYPE, bdf_ext); +} + +static int ath10k_core_check_smbios(struct ath10k *ar) +{ + ar->id.bdf_ext[0] = '\0'; + dmi_walk(ath10k_core_check_bdfext, ar); + + if (ar->id.bdf_ext[0] == '\0') + return -ENODATA; + + return 0; +} + +int ath10k_core_check_dt(struct ath10k *ar) +{ + struct device_node *node; + const char *variant = NULL; + + node = ar->dev->of_node; + if (!node) + return -ENOENT; + + of_property_read_string(node, "qcom,calibration-variant", + &variant); + if (!variant) + of_property_read_string(node, "qcom,ath10k-calibration-variant", + &variant); + if (!variant) + return -ENODATA; + + if (strscpy(ar->id.bdf_ext, variant, sizeof(ar->id.bdf_ext)) < 0) + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "bdf variant string is longer than the buffer can accommodate (variant: %s)\n", + variant); + + return 0; +} +EXPORT_SYMBOL(ath10k_core_check_dt); + +static int ath10k_download_fw(struct ath10k *ar) +{ + u32 address, data_len; + const void *data; + int ret; + struct pm_qos_request latency_qos = {}; + + address = ar->hw_params.patch_load_addr; + + data = ar->running_fw->fw_file.firmware_data; + data_len = ar->running_fw->fw_file.firmware_len; + + ret = ath10k_swap_code_seg_configure(ar, &ar->running_fw->fw_file); + if (ret) { + ath10k_err(ar, "failed to configure fw code swap: %d\n", + ret); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot uploading firmware image %p len %d\n", + data, data_len); + + /* Check if device supports to download firmware via + * diag copy engine. Downloading firmware via diag CE + * greatly reduces the time to download firmware. + */ + if (ar->hw_params.fw_diag_ce_download) { + ret = ath10k_hw_diag_fast_download(ar, address, + data, data_len); + if (ret == 0) + /* firmware upload via diag ce was successful */ + return 0; + + ath10k_warn(ar, + "failed to upload firmware via diag ce, trying BMI: %d", + ret); + } + + cpu_latency_qos_add_request(&latency_qos, 0); + + ret = ath10k_bmi_fast_download(ar, address, data, data_len); + + cpu_latency_qos_remove_request(&latency_qos); + + return ret; +} + +void ath10k_core_free_board_files(struct ath10k *ar) +{ + if (!IS_ERR(ar->normal_mode_fw.board)) + release_firmware(ar->normal_mode_fw.board); + + if (!IS_ERR(ar->normal_mode_fw.ext_board)) + release_firmware(ar->normal_mode_fw.ext_board); + + ar->normal_mode_fw.board = NULL; + ar->normal_mode_fw.board_data = NULL; + ar->normal_mode_fw.board_len = 0; + ar->normal_mode_fw.ext_board = NULL; + ar->normal_mode_fw.ext_board_data = NULL; + ar->normal_mode_fw.ext_board_len = 0; +} +EXPORT_SYMBOL(ath10k_core_free_board_files); + +static void ath10k_core_free_firmware_files(struct ath10k *ar) +{ + if (!IS_ERR(ar->normal_mode_fw.fw_file.firmware)) + release_firmware(ar->normal_mode_fw.fw_file.firmware); + + if (!IS_ERR(ar->cal_file)) + release_firmware(ar->cal_file); + + if (!IS_ERR(ar->pre_cal_file)) + release_firmware(ar->pre_cal_file); + + ath10k_swap_code_seg_release(ar, &ar->normal_mode_fw.fw_file); + + ar->normal_mode_fw.fw_file.otp_data = NULL; + ar->normal_mode_fw.fw_file.otp_len = 0; + + ar->normal_mode_fw.fw_file.firmware = NULL; + ar->normal_mode_fw.fw_file.firmware_data = NULL; + ar->normal_mode_fw.fw_file.firmware_len = 0; + + ar->cal_file = NULL; + ar->pre_cal_file = NULL; +} + +static int ath10k_fetch_cal_file(struct ath10k *ar) +{ + char filename[100]; + + /* pre-cal-<bus>-<id>.bin */ + scnprintf(filename, sizeof(filename), "pre-cal-%s-%s.bin", + ath10k_bus_str(ar->hif.bus), dev_name(ar->dev)); + + ar->pre_cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename); + if (!IS_ERR(ar->pre_cal_file)) + goto success; + + /* cal-<bus>-<id>.bin */ + scnprintf(filename, sizeof(filename), "cal-%s-%s.bin", + ath10k_bus_str(ar->hif.bus), dev_name(ar->dev)); + + ar->cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename); + if (IS_ERR(ar->cal_file)) + /* calibration file is optional, don't print any warnings */ + return PTR_ERR(ar->cal_file); +success: + ath10k_dbg(ar, ATH10K_DBG_BOOT, "found calibration file %s/%s\n", + ATH10K_FW_DIR, filename); + + return 0; +} + +static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar, int bd_ie_type) { - u32 board_data_size = QCA988X_BOARD_DATA_SZ; - u32 address; const struct firmware *fw; + char boardname[100]; + + if (bd_ie_type == ATH10K_BD_IE_BOARD) { + scnprintf(boardname, sizeof(boardname), "board-%s-%s.bin", + ath10k_bus_str(ar->hif.bus), dev_name(ar->dev)); + + ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar, + ar->hw_params.fw.dir, + boardname); + if (IS_ERR(ar->normal_mode_fw.board)) { + fw = ath10k_fetch_fw_file(ar, + ar->hw_params.fw.dir, + ATH10K_BOARD_DATA_FILE); + ar->normal_mode_fw.board = fw; + } + + if (IS_ERR(ar->normal_mode_fw.board)) + return PTR_ERR(ar->normal_mode_fw.board); + + ar->normal_mode_fw.board_data = ar->normal_mode_fw.board->data; + ar->normal_mode_fw.board_len = ar->normal_mode_fw.board->size; + } else if (bd_ie_type == ATH10K_BD_IE_BOARD_EXT) { + fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, + ATH10K_EBOARD_DATA_FILE); + ar->normal_mode_fw.ext_board = fw; + if (IS_ERR(ar->normal_mode_fw.ext_board)) + return PTR_ERR(ar->normal_mode_fw.ext_board); + + ar->normal_mode_fw.ext_board_data = ar->normal_mode_fw.ext_board->data; + ar->normal_mode_fw.ext_board_len = ar->normal_mode_fw.ext_board->size; + } + + return 0; +} + +static int ath10k_core_parse_bd_ie_board(struct ath10k *ar, + const void *buf, size_t buf_len, + const char *boardname, + int bd_ie_type) +{ + const struct ath10k_fw_ie *hdr; + bool name_match_found; + int ret, board_ie_id; + size_t board_ie_len; + const void *board_ie_data; + + name_match_found = false; + + /* go through ATH10K_BD_IE_BOARD_ elements */ + while (buf_len > sizeof(struct ath10k_fw_ie)) { + hdr = buf; + board_ie_id = le32_to_cpu(hdr->id); + board_ie_len = le32_to_cpu(hdr->len); + board_ie_data = hdr->data; + + buf_len -= sizeof(*hdr); + buf += sizeof(*hdr); + + if (buf_len < ALIGN(board_ie_len, 4)) { + ath10k_err(ar, "invalid ATH10K_BD_IE_BOARD length: %zu < %zu\n", + buf_len, ALIGN(board_ie_len, 4)); + ret = -EINVAL; + goto out; + } + + switch (board_ie_id) { + case ATH10K_BD_IE_BOARD_NAME: + ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "board name", "", + board_ie_data, board_ie_len); + + if (board_ie_len != strlen(boardname)) + break; + + ret = memcmp(board_ie_data, boardname, strlen(boardname)); + if (ret) + break; + + name_match_found = true; + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot found match for name '%s'", + boardname); + break; + case ATH10K_BD_IE_BOARD_DATA: + if (!name_match_found) + /* no match found */ + break; + + if (bd_ie_type == ATH10K_BD_IE_BOARD) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot found board data for '%s'", + boardname); + + ar->normal_mode_fw.board_data = board_ie_data; + ar->normal_mode_fw.board_len = board_ie_len; + } else if (bd_ie_type == ATH10K_BD_IE_BOARD_EXT) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot found eboard data for '%s'", + boardname); + + ar->normal_mode_fw.ext_board_data = board_ie_data; + ar->normal_mode_fw.ext_board_len = board_ie_len; + } + + ret = 0; + goto out; + default: + ath10k_warn(ar, "unknown ATH10K_BD_IE_BOARD found: %d\n", + board_ie_id); + break; + } + + /* jump over the padding */ + board_ie_len = ALIGN(board_ie_len, 4); + + buf_len -= board_ie_len; + buf += board_ie_len; + } + + /* no match found */ + ret = -ENOENT; + +out: + return ret; +} + +static int ath10k_core_search_bd(struct ath10k *ar, + const char *boardname, + const u8 *data, + size_t len) +{ + size_t ie_len; + struct ath10k_fw_ie *hdr; + int ret = -ENOENT, ie_id; + + while (len > sizeof(struct ath10k_fw_ie)) { + hdr = (struct ath10k_fw_ie *)data; + ie_id = le32_to_cpu(hdr->id); + ie_len = le32_to_cpu(hdr->len); + + len -= sizeof(*hdr); + data = hdr->data; + + if (len < ALIGN(ie_len, 4)) { + ath10k_err(ar, "invalid length for board ie_id %d ie_len %zu len %zu\n", + ie_id, ie_len, len); + return -EINVAL; + } + + switch (ie_id) { + case ATH10K_BD_IE_BOARD: + ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len, + boardname, + ATH10K_BD_IE_BOARD); + if (ret == -ENOENT) + /* no match found, continue */ + break; + + /* either found or error, so stop searching */ + goto out; + case ATH10K_BD_IE_BOARD_EXT: + ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len, + boardname, + ATH10K_BD_IE_BOARD_EXT); + if (ret == -ENOENT) + /* no match found, continue */ + break; + + /* either found or error, so stop searching */ + goto out; + } + + /* jump over the padding */ + ie_len = ALIGN(ie_len, 4); + + len -= ie_len; + data += ie_len; + } + +out: + /* return result of parse_bd_ie_board() or -ENOENT */ + return ret; +} + +static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar, + const char *boardname, + const char *fallback_boardname1, + const char *fallback_boardname2, + const char *filename) +{ + size_t len, magic_len; + const u8 *data; + int ret; + + /* Skip if already fetched during board data download */ + if (!ar->normal_mode_fw.board) + ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar, + ar->hw_params.fw.dir, + filename); + if (IS_ERR(ar->normal_mode_fw.board)) + return PTR_ERR(ar->normal_mode_fw.board); + + data = ar->normal_mode_fw.board->data; + len = ar->normal_mode_fw.board->size; + + /* magic has extra null byte padded */ + magic_len = strlen(ATH10K_BOARD_MAGIC) + 1; + if (len < magic_len) { + ath10k_err(ar, "failed to find magic value in %s/%s, file too short: %zu\n", + ar->hw_params.fw.dir, filename, len); + ret = -EINVAL; + goto err; + } + + if (memcmp(data, ATH10K_BOARD_MAGIC, magic_len)) { + ath10k_err(ar, "found invalid board magic\n"); + ret = -EINVAL; + goto err; + } + + /* magic is padded to 4 bytes */ + magic_len = ALIGN(magic_len, 4); + if (len < magic_len) { + ath10k_err(ar, "failed: %s/%s too small to contain board data, len: %zu\n", + ar->hw_params.fw.dir, filename, len); + ret = -EINVAL; + goto err; + } + + data += magic_len; + len -= magic_len; + + /* attempt to find boardname in the IE list */ + ret = ath10k_core_search_bd(ar, boardname, data, len); + + /* if we didn't find it and have a fallback name, try that */ + if (ret == -ENOENT && fallback_boardname1) + ret = ath10k_core_search_bd(ar, fallback_boardname1, data, len); + + if (ret == -ENOENT && fallback_boardname2) + ret = ath10k_core_search_bd(ar, fallback_boardname2, data, len); + + if (ret == -ENOENT) { + ath10k_err(ar, + "failed to fetch board data for %s from %s/%s\n", + boardname, ar->hw_params.fw.dir, filename); + ret = -ENODATA; + } + + if (ret) + goto err; + + return 0; + +err: + ath10k_core_free_board_files(ar); + return ret; +} + +static int ath10k_core_create_board_name(struct ath10k *ar, char *name, + size_t name_len, bool with_variant, + bool with_chip_id) +{ + /* strlen(',variant=') + strlen(ar->id.bdf_ext) */ + char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = {}; + + if (with_variant && ar->id.bdf_ext[0] != '\0') + scnprintf(variant, sizeof(variant), ",variant=%s", + ar->id.bdf_ext); + + if (ar->id.bmi_ids_valid) { + scnprintf(name, name_len, + "bus=%s,bmi-chip-id=%d,bmi-board-id=%d%s", + ath10k_bus_str(ar->hif.bus), + ar->id.bmi_chip_id, + ar->id.bmi_board_id, variant); + goto out; + } + + if (ar->id.qmi_ids_valid) { + if (with_chip_id) + scnprintf(name, name_len, + "bus=%s,qmi-board-id=%x,qmi-chip-id=%x%s", + ath10k_bus_str(ar->hif.bus), + ar->id.qmi_board_id, ar->id.qmi_chip_id, + variant); + else + scnprintf(name, name_len, + "bus=%s,qmi-board-id=%x", + ath10k_bus_str(ar->hif.bus), + ar->id.qmi_board_id); + goto out; + } + + scnprintf(name, name_len, + "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s", + ath10k_bus_str(ar->hif.bus), + ar->id.vendor, ar->id.device, + ar->id.subsystem_vendor, ar->id.subsystem_device, variant); +out: + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using board name '%s'\n", name); + + return 0; +} + +static int ath10k_core_create_eboard_name(struct ath10k *ar, char *name, + size_t name_len) +{ + if (ar->id.bmi_ids_valid) { + scnprintf(name, name_len, + "bus=%s,bmi-chip-id=%d,bmi-eboard-id=%d", + ath10k_bus_str(ar->hif.bus), + ar->id.bmi_chip_id, + ar->id.bmi_eboard_id); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using eboard name '%s'\n", name); + return 0; + } + /* Fallback if returned board id is zero */ + return -1; +} + +int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type) +{ + char boardname[100], fallback_boardname1[100], fallback_boardname2[100]; int ret; - fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, - ar->hw_params.fw.board); - if (IS_ERR(fw)) { - ath10k_err("could not fetch board data fw file (%ld)\n", - PTR_ERR(fw)); - return PTR_ERR(fw); + if (bd_ie_type == ATH10K_BD_IE_BOARD) { + /* With variant and chip id */ + ret = ath10k_core_create_board_name(ar, boardname, + sizeof(boardname), true, + true); + if (ret) { + ath10k_err(ar, "failed to create board name: %d", ret); + return ret; + } + + /* Without variant and only chip-id */ + ret = ath10k_core_create_board_name(ar, fallback_boardname1, + sizeof(boardname), false, + true); + if (ret) { + ath10k_err(ar, "failed to create 1st fallback board name: %d", + ret); + return ret; + } + + /* Without variant and without chip-id */ + ret = ath10k_core_create_board_name(ar, fallback_boardname2, + sizeof(boardname), false, + false); + if (ret) { + ath10k_err(ar, "failed to create 2nd fallback board name: %d", + ret); + return ret; + } + } else if (bd_ie_type == ATH10K_BD_IE_BOARD_EXT) { + ret = ath10k_core_create_eboard_name(ar, boardname, + sizeof(boardname)); + if (ret) { + ath10k_err(ar, "fallback to eboard.bin since board id 0"); + goto fallback; + } } - ret = ath10k_push_board_ext_data(ar, fw); + ar->bd_api = 2; + ret = ath10k_core_fetch_board_data_api_n(ar, boardname, + fallback_boardname1, + fallback_boardname2, + ATH10K_BOARD_API2_FILE); + if (!ret) + goto success; + +fallback: + ar->bd_api = 1; + ret = ath10k_core_fetch_board_data_api_1(ar, bd_ie_type); if (ret) { - ath10k_err("could not push board ext data (%d)\n", ret); + ath10k_err(ar, "failed to fetch board-2.bin or board.bin from %s\n", + ar->hw_params.fw.dir); + return ret; + } + +success: + ath10k_dbg(ar, ATH10K_DBG_BOOT, "using board api %d\n", ar->bd_api); + return 0; +} +EXPORT_SYMBOL(ath10k_core_fetch_board_file); + +static int ath10k_core_get_ext_board_id_from_otp(struct ath10k *ar) +{ + u32 result, address; + u8 ext_board_id; + int ret; + + address = ar->hw_params.patch_load_addr; + + if (!ar->normal_mode_fw.fw_file.otp_data || + !ar->normal_mode_fw.fw_file.otp_len) { + ath10k_warn(ar, + "failed to retrieve extended board id due to otp binary missing\n"); + return -ENODATA; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot upload otp to 0x%x len %zd for ext board id\n", + address, ar->normal_mode_fw.fw_file.otp_len); + + ret = ath10k_bmi_fast_download(ar, address, + ar->normal_mode_fw.fw_file.otp_data, + ar->normal_mode_fw.fw_file.otp_len); + if (ret) { + ath10k_err(ar, "could not write otp for ext board id check: %d\n", + ret); + return ret; + } + + ret = ath10k_bmi_execute(ar, address, BMI_PARAM_GET_EXT_BOARD_ID, &result); + if (ret) { + ath10k_err(ar, "could not execute otp for ext board id check: %d\n", + ret); + return ret; + } + + if (!result) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "ext board id does not exist in otp, ignore it\n"); + return -EOPNOTSUPP; + } + + ext_board_id = result & ATH10K_BMI_EBOARD_ID_STATUS_MASK; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot get otp ext board id result 0x%08x ext_board_id %d\n", + result, ext_board_id); + + ar->id.bmi_eboard_id = ext_board_id; + + return 0; +} + +static int ath10k_download_board_data(struct ath10k *ar, const void *data, + size_t data_len) +{ + u32 board_data_size = ar->hw_params.fw.board_size; + u32 eboard_data_size = ar->hw_params.fw.ext_board_size; + u32 board_address; + u32 ext_board_address; + int ret; + + ret = ath10k_push_board_ext_data(ar, data, data_len); + if (ret) { + ath10k_err(ar, "could not push board ext data (%d)\n", ret); goto exit; } - ret = ath10k_bmi_read32(ar, hi_board_data, &address); + ret = ath10k_bmi_read32(ar, hi_board_data, &board_address); if (ret) { - ath10k_err("could not read board data addr (%d)\n", ret); + ath10k_err(ar, "could not read board data addr (%d)\n", ret); goto exit; } - ret = ath10k_bmi_write_memory(ar, address, fw->data, - min_t(u32, board_data_size, fw->size)); + ret = ath10k_bmi_write_memory(ar, board_address, data, + min_t(u32, board_data_size, + data_len)); if (ret) { - ath10k_err("could not write board data (%d)\n", ret); + ath10k_err(ar, "could not write board data (%d)\n", ret); goto exit; } ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1); if (ret) { - ath10k_err("could not write board data bit (%d)\n", ret); + ath10k_err(ar, "could not write board data bit (%d)\n", ret); + goto exit; + } + + if (!ar->id.ext_bid_supported) + goto exit; + + /* Extended board data download */ + ret = ath10k_core_get_ext_board_id_from_otp(ar); + if (ret == -EOPNOTSUPP) { + /* Not fetching ext_board_data if ext board id is 0 */ + ath10k_dbg(ar, ATH10K_DBG_BOOT, "otp returned ext board id 0\n"); + return 0; + } else if (ret) { + ath10k_err(ar, "failed to get extended board id: %d\n", ret); goto exit; } + ret = ath10k_core_fetch_board_file(ar, ATH10K_BD_IE_BOARD_EXT); + if (ret) + goto exit; + + if (ar->normal_mode_fw.ext_board_data) { + ext_board_address = board_address + EXT_BOARD_ADDRESS_OFFSET; + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot writing ext board data to addr 0x%x", + ext_board_address); + ret = ath10k_bmi_write_memory(ar, ext_board_address, + ar->normal_mode_fw.ext_board_data, + min_t(u32, eboard_data_size, data_len)); + if (ret) + ath10k_err(ar, "failed to write ext board data: %d\n", ret); + } + exit: - release_firmware(fw); return ret; } static int ath10k_download_and_run_otp(struct ath10k *ar) { - const struct firmware *fw; - u32 address; - u32 exec_param; + u32 result, address = ar->hw_params.patch_load_addr; + u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param; int ret; + ret = ath10k_download_board_data(ar, + ar->running_fw->board_data, + ar->running_fw->board_len); + if (ret) { + ath10k_err(ar, "failed to download board data: %d\n", ret); + return ret; + } + /* OTP is optional */ - if (ar->hw_params.fw.otp == NULL) { - ath10k_info("otp file not defined\n"); + if (!ar->running_fw->fw_file.otp_data || + !ar->running_fw->fw_file.otp_len) { + ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n", + ar->running_fw->fw_file.otp_data, + ar->running_fw->fw_file.otp_len); return 0; } - address = ar->hw_params.patch_load_addr; + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n", + address, ar->running_fw->fw_file.otp_len); - fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, - ar->hw_params.fw.otp); - if (IS_ERR(fw)) { - ath10k_warn("could not fetch otp (%ld)\n", PTR_ERR(fw)); - return 0; + ret = ath10k_bmi_fast_download(ar, address, + ar->running_fw->fw_file.otp_data, + ar->running_fw->fw_file.otp_len); + if (ret) { + ath10k_err(ar, "could not write otp (%d)\n", ret); + return ret; } - ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size); + /* As of now pre-cal is valid for 10_4 variants */ + if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT || + ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE || + ar->cal_mode == ATH10K_PRE_CAL_MODE_NVMEM) + bmi_otp_exe_param = BMI_PARAM_FLASH_SECTION_ALL; + + ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result); if (ret) { - ath10k_err("could not write otp (%d)\n", ret); - goto exit; + ath10k_err(ar, "could not execute otp (%d)\n", ret); + return ret; } - exec_param = 0; - ret = ath10k_bmi_execute(ar, address, &exec_param); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result); + + if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT, + ar->running_fw->fw_file.fw_features)) && + result != 0) { + ath10k_err(ar, "otp calibration failed: %d", result); + return -EINVAL; + } + + return 0; +} + +static int ath10k_download_cal_file(struct ath10k *ar, + const struct firmware *file) +{ + int ret; + + if (!file) + return -ENOENT; + + if (IS_ERR(file)) + return PTR_ERR(file); + + ret = ath10k_download_board_data(ar, file->data, file->size); if (ret) { - ath10k_err("could not execute otp (%d)\n", ret); - goto exit; + ath10k_err(ar, "failed to download cal_file data: %d\n", ret); + return ret; } -exit: - release_firmware(fw); - return ret; + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cal file downloaded\n"); + + return 0; } -static int ath10k_download_fw(struct ath10k *ar) +static int ath10k_download_cal_dt(struct ath10k *ar, const char *dt_name) { - const struct firmware *fw; - u32 address; + struct device_node *node; + int data_len; + void *data; int ret; - if (ar->hw_params.fw.fw == NULL) - return -EINVAL; + node = ar->dev->of_node; + if (!node) + /* Device Tree is optional, don't print any warnings if + * there's no node for ath10k. + */ + return -ENOENT; - address = ar->hw_params.patch_load_addr; + if (!of_get_property(node, dt_name, &data_len)) { + /* The calibration data node is optional */ + return -ENOENT; + } + + if (data_len != ar->hw_params.cal_data_len) { + ath10k_warn(ar, "invalid calibration data length in DT: %d\n", + data_len); + ret = -EMSGSIZE; + goto out; + } - fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, - ar->hw_params.fw.fw); - if (IS_ERR(fw)) { - ath10k_err("could not fetch fw (%ld)\n", PTR_ERR(fw)); - return PTR_ERR(fw); + data = kmalloc(data_len, GFP_KERNEL); + if (!data) { + ret = -ENOMEM; + goto out; } - ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size); + ret = of_property_read_u8_array(node, dt_name, data, data_len); if (ret) { - ath10k_err("could not write fw (%d)\n", ret); - goto exit; + ath10k_warn(ar, "failed to read calibration data from DT: %d\n", + ret); + goto out_free; } -exit: - release_firmware(fw); + ret = ath10k_download_board_data(ar, data, data_len); + if (ret) { + ath10k_warn(ar, "failed to download calibration data from Device Tree: %d\n", + ret); + goto out_free; + } + + ret = 0; + +out_free: + kfree(data); + +out: return ret; } -static int ath10k_init_download_firmware(struct ath10k *ar) +static int ath10k_download_cal_eeprom(struct ath10k *ar) { + size_t data_len; + void *data = NULL; int ret; - ret = ath10k_download_board_data(ar); + ret = ath10k_hif_fetch_cal_eeprom(ar, &data, &data_len); + if (ret) { + if (ret != -EOPNOTSUPP) + ath10k_warn(ar, "failed to read calibration data from EEPROM: %d\n", + ret); + goto out_free; + } + + ret = ath10k_download_board_data(ar, data, data_len); + if (ret) { + ath10k_warn(ar, "failed to download calibration data from EEPROM: %d\n", + ret); + goto out_free; + } + + ret = 0; + +out_free: + kfree(data); + + return ret; +} + +static int ath10k_download_cal_nvmem(struct ath10k *ar, const char *cell_name) +{ + struct nvmem_cell *cell; + void *buf; + size_t len; + int ret; + + cell = devm_nvmem_cell_get(ar->dev, cell_name); + if (IS_ERR(cell)) { + ret = PTR_ERR(cell); + return ret; + } + + buf = nvmem_cell_read(cell, &len); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + if (ar->hw_params.cal_data_len != len) { + kfree(buf); + ath10k_warn(ar, "invalid calibration data length in nvmem-cell '%s': %zu != %u\n", + cell_name, len, ar->hw_params.cal_data_len); + return -EMSGSIZE; + } + + ret = ath10k_download_board_data(ar, buf, len); + kfree(buf); if (ret) + ath10k_warn(ar, "failed to download calibration data from nvmem-cell '%s': %d\n", + cell_name, ret); + + return ret; +} + +int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name, + struct ath10k_fw_file *fw_file) +{ + size_t magic_len, len, ie_len; + int ie_id, i, index, bit, ret; + struct ath10k_fw_ie *hdr; + const u8 *data; + __le32 *timestamp, *version; + + /* first fetch the firmware file (firmware-*.bin) */ + fw_file->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, + name); + if (IS_ERR(fw_file->firmware)) + return PTR_ERR(fw_file->firmware); + + data = fw_file->firmware->data; + len = fw_file->firmware->size; + + /* magic also includes the null byte, check that as well */ + magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1; + + if (len < magic_len) { + ath10k_err(ar, "firmware file '%s/%s' too small to contain magic: %zu\n", + ar->hw_params.fw.dir, name, len); + ret = -EINVAL; + goto err; + } + + if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) { + ath10k_err(ar, "invalid firmware magic\n"); + ret = -EINVAL; + goto err; + } + + /* jump over the padding */ + magic_len = ALIGN(magic_len, 4); + + len -= magic_len; + data += magic_len; + + /* loop elements */ + while (len > sizeof(struct ath10k_fw_ie)) { + hdr = (struct ath10k_fw_ie *)data; + + ie_id = le32_to_cpu(hdr->id); + ie_len = le32_to_cpu(hdr->len); + + len -= sizeof(*hdr); + data += sizeof(*hdr); + + if (len < ie_len) { + ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n", + ie_id, len, ie_len); + ret = -EINVAL; + goto err; + } + + switch (ie_id) { + case ATH10K_FW_IE_FW_VERSION: + if (ie_len > sizeof(fw_file->fw_version) - 1) + break; + + memcpy(fw_file->fw_version, data, ie_len); + fw_file->fw_version[ie_len] = '\0'; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "found fw version %s\n", + fw_file->fw_version); + break; + case ATH10K_FW_IE_TIMESTAMP: + if (ie_len != sizeof(u32)) + break; + + timestamp = (__le32 *)data; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw timestamp %d\n", + le32_to_cpup(timestamp)); + break; + case ATH10K_FW_IE_FEATURES: + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "found firmware features ie (%zd B)\n", + ie_len); + + for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) { + index = i / 8; + bit = i % 8; + + if (index == ie_len) + break; + + if (data[index] & (1 << bit)) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "Enabling feature bit: %i\n", + i); + __set_bit(i, fw_file->fw_features); + } + } + + ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "", + fw_file->fw_features, + sizeof(fw_file->fw_features)); + break; + case ATH10K_FW_IE_FW_IMAGE: + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "found fw image ie (%zd B)\n", + ie_len); + + fw_file->firmware_data = data; + fw_file->firmware_len = ie_len; + + break; + case ATH10K_FW_IE_OTP_IMAGE: + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "found otp image ie (%zd B)\n", + ie_len); + + fw_file->otp_data = data; + fw_file->otp_len = ie_len; + + break; + case ATH10K_FW_IE_WMI_OP_VERSION: + if (ie_len != sizeof(u32)) + break; + + version = (__le32 *)data; + + fw_file->wmi_op_version = le32_to_cpup(version); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n", + fw_file->wmi_op_version); + break; + case ATH10K_FW_IE_HTT_OP_VERSION: + if (ie_len != sizeof(u32)) + break; + + version = (__le32 *)data; + + fw_file->htt_op_version = le32_to_cpup(version); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n", + fw_file->htt_op_version); + break; + case ATH10K_FW_IE_FW_CODE_SWAP_IMAGE: + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "found fw code swap image ie (%zd B)\n", + ie_len); + fw_file->codeswap_data = data; + fw_file->codeswap_len = ie_len; + break; + default: + ath10k_warn(ar, "Unknown FW IE: %u\n", + le32_to_cpu(hdr->id)); + break; + } + + /* jump over the padding */ + ie_len = ALIGN(ie_len, 4); + + len -= ie_len; + data += ie_len; + } + + if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, fw_file->fw_features) && + (!fw_file->firmware_data || !fw_file->firmware_len)) { + ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n", + ar->hw_params.fw.dir, name); + ret = -ENOMEDIUM; + goto err; + } + + return 0; + +err: + ath10k_core_free_firmware_files(ar); + return ret; +} + +static void ath10k_core_get_fw_name(struct ath10k *ar, char *fw_name, + size_t fw_name_len, int fw_api) +{ + switch (ar->hif.bus) { + case ATH10K_BUS_SDIO: + case ATH10K_BUS_USB: + scnprintf(fw_name, fw_name_len, "%s-%s-%d.bin", + ATH10K_FW_FILE_BASE, ath10k_bus_str(ar->hif.bus), + fw_api); + break; + case ATH10K_BUS_PCI: + case ATH10K_BUS_AHB: + case ATH10K_BUS_SNOC: + scnprintf(fw_name, fw_name_len, "%s-%d.bin", + ATH10K_FW_FILE_BASE, fw_api); + break; + } +} + +static int ath10k_core_fetch_firmware_files(struct ath10k *ar) +{ + int ret, i; + char fw_name[100]; + + /* calibration file is optional, don't check for any errors */ + ath10k_fetch_cal_file(ar); + + for (i = ATH10K_FW_API_MAX; i >= ATH10K_FW_API_MIN; i--) { + ar->fw_api = i; + ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", + ar->fw_api); + + ath10k_core_get_fw_name(ar, fw_name, sizeof(fw_name), ar->fw_api); + ret = ath10k_core_fetch_firmware_api_n(ar, fw_name, + &ar->normal_mode_fw.fw_file); + if (!ret) + goto success; + } + + /* we end up here if we couldn't fetch any firmware */ + + ath10k_err(ar, "Failed to find firmware-N.bin (N between %d and %d) from %s: %d", + ATH10K_FW_API_MIN, ATH10K_FW_API_MAX, ar->hw_params.fw.dir, + ret); + + return ret; + +success: + ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api); + + return 0; +} + +static int ath10k_core_pre_cal_download(struct ath10k *ar) +{ + int ret; + + ret = ath10k_download_cal_nvmem(ar, "pre-calibration"); + if (ret == 0) { + ar->cal_mode = ATH10K_PRE_CAL_MODE_NVMEM; + goto success; + } else if (ret == -EPROBE_DEFER) { + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot did not find a pre-calibration nvmem-cell, try file next: %d\n", + ret); + + ret = ath10k_download_cal_file(ar, ar->pre_cal_file); + if (ret == 0) { + ar->cal_mode = ATH10K_PRE_CAL_MODE_FILE; + goto success; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot did not find a pre calibration file, try DT next: %d\n", + ret); + + ret = ath10k_download_cal_dt(ar, "qcom,pre-calibration-data"); + if (ret == -ENOENT) + ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data"); + if (ret) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "unable to load pre cal data from DT: %d\n", ret); + return ret; + } + ar->cal_mode = ATH10K_PRE_CAL_MODE_DT; + +success: + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n", + ath10k_cal_mode_str(ar->cal_mode)); + + return 0; +} + +static int ath10k_core_pre_cal_config(struct ath10k *ar) +{ + int ret; + + ret = ath10k_core_pre_cal_download(ar); + if (ret) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "failed to load pre cal data: %d\n", ret); + return ret; + } + + ret = ath10k_core_get_board_id_from_otp(ar); + if (ret) { + ath10k_err(ar, "failed to get board id: %d\n", ret); return ret; + } ret = ath10k_download_and_run_otp(ar); - if (ret) + if (ret) { + ath10k_err(ar, "failed to run otp: %d\n", ret); return ret; + } - ret = ath10k_download_fw(ar); - if (ret) + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "pre cal configuration done successfully\n"); + + return 0; +} + +static int ath10k_download_cal_data(struct ath10k *ar) +{ + int ret; + + ret = ath10k_core_pre_cal_config(ar); + if (ret == 0) + return 0; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "pre cal download procedure failed, try cal file: %d\n", + ret); + + ret = ath10k_download_cal_nvmem(ar, "calibration"); + if (ret == 0) { + ar->cal_mode = ATH10K_CAL_MODE_NVMEM; + goto done; + } else if (ret == -EPROBE_DEFER) { return ret; + } - return ret; + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot did not find a calibration nvmem-cell, try file next: %d\n", + ret); + + ret = ath10k_download_cal_file(ar, ar->cal_file); + if (ret == 0) { + ar->cal_mode = ATH10K_CAL_MODE_FILE; + goto done; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot did not find a calibration file, try DT next: %d\n", + ret); + + ret = ath10k_download_cal_dt(ar, "qcom,calibration-data"); + if (ret == -ENOENT) + ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data"); + if (ret == 0) { + ar->cal_mode = ATH10K_CAL_MODE_DT; + goto done; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot did not find DT entry, try target EEPROM next: %d\n", + ret); + + ret = ath10k_download_cal_eeprom(ar); + if (ret == 0) { + ar->cal_mode = ATH10K_CAL_MODE_EEPROM; + goto done; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot did not find target EEPROM entry, try OTP next: %d\n", + ret); + + ret = ath10k_download_and_run_otp(ar); + if (ret) { + ath10k_err(ar, "failed to run otp: %d\n", ret); + return ret; + } + + ar->cal_mode = ATH10K_CAL_MODE_OTP; + +done: + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n", + ath10k_cal_mode_str(ar->cal_mode)); + return 0; +} + +static void ath10k_core_fetch_btcoex_dt(struct ath10k *ar) +{ + struct device_node *node; + u8 coex_support = 0; + int ret; + + node = ar->dev->of_node; + if (!node) + goto out; + + ret = of_property_read_u8(node, "qcom,coexist-support", &coex_support); + if (ret) { + ar->coex_support = true; + goto out; + } + + if (coex_support) { + ar->coex_support = true; + } else { + ar->coex_support = false; + ar->coex_gpio_pin = -1; + goto out; + } + + ret = of_property_read_u32(node, "qcom,coexist-gpio-pin", + &ar->coex_gpio_pin); + if (ret) + ar->coex_gpio_pin = -1; + +out: + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot coex_support %d coex_gpio_pin %d\n", + ar->coex_support, ar->coex_gpio_pin); } static int ath10k_init_uart(struct ath10k *ar) @@ -389,277 +2422,1363 @@ static int ath10k_init_uart(struct ath10k *ar) */ ret = ath10k_bmi_write32(ar, hi_serial_enable, 0); if (ret) { - ath10k_warn("could not disable UART prints (%d)\n", ret); + ath10k_warn(ar, "could not disable UART prints (%d)\n", ret); return ret; } if (!uart_print) { - ath10k_info("UART prints disabled\n"); + if (ar->hw_params.uart_pin_workaround) { + ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, + ar->hw_params.uart_pin); + if (ret) { + ath10k_warn(ar, "failed to set UART TX pin: %d", + ret); + return ret; + } + } + return 0; } - ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, 7); + ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, ar->hw_params.uart_pin); if (ret) { - ath10k_warn("could not enable UART prints (%d)\n", ret); + ath10k_warn(ar, "could not enable UART prints (%d)\n", ret); return ret; } ret = ath10k_bmi_write32(ar, hi_serial_enable, 1); if (ret) { - ath10k_warn("could not enable UART prints (%d)\n", ret); + ath10k_warn(ar, "could not enable UART prints (%d)\n", ret); return ret; } - ath10k_info("UART prints enabled\n"); + /* Set the UART baud rate to 19200. */ + ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200); + if (ret) { + ath10k_warn(ar, "could not set the baud rate (%d)\n", ret); + return ret; + } + + ath10k_info(ar, "UART prints enabled\n"); return 0; } static int ath10k_init_hw_params(struct ath10k *ar) { - const struct ath10k_hw_params *uninitialized_var(hw_params); + const struct ath10k_hw_params *hw_params; int i; for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) { hw_params = &ath10k_hw_params_list[i]; - if (hw_params->id == ar->target_version) + if (hw_params->bus == ar->hif.bus && + hw_params->id == ar->target_version && + hw_params->dev_id == ar->dev_id) break; } if (i == ARRAY_SIZE(ath10k_hw_params_list)) { - ath10k_err("Unsupported hardware version: 0x%x\n", + ath10k_err(ar, "Unsupported hardware version: 0x%x\n", ar->target_version); return -EINVAL; } ar->hw_params = *hw_params; - ath10k_info("Hardware name %s version 0x%x\n", - ar->hw_params.name, ar->target_version); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "Hardware name %s version 0x%x\n", + ar->hw_params.name, ar->target_version); return 0; } -struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, - enum ath10k_bus bus, - const struct ath10k_hif_ops *hif_ops) +static void ath10k_core_recovery_check_work(struct work_struct *work) { - struct ath10k *ar; + struct ath10k *ar = container_of(work, struct ath10k, recovery_check_work); + long time_left; - ar = ath10k_mac_create(); - if (!ar) - return NULL; + /* Sometimes the recovery will fail and then the next all recovery fail, + * so avoid infinite recovery. + */ + if (atomic_read(&ar->fail_cont_count) >= ATH10K_RECOVERY_MAX_FAIL_COUNT) { + ath10k_err(ar, "consecutive fail %d times, will shutdown driver!", + atomic_read(&ar->fail_cont_count)); + ar->state = ATH10K_STATE_WEDGED; + return; + } - ar->ath_common.priv = ar; - ar->ath_common.hw = ar->hw; + ath10k_dbg(ar, ATH10K_DBG_BOOT, "total recovery count: %d", ++ar->recovery_count); + + if (atomic_read(&ar->pending_recovery)) { + /* Sometimes it happened another recovery work before the previous one + * completed, then the second recovery work will destroy the previous + * one, thus below is to avoid that. + */ + time_left = wait_for_completion_timeout(&ar->driver_recovery, + ATH10K_RECOVERY_TIMEOUT_HZ); + if (time_left) { + ath10k_warn(ar, "previous recovery succeeded, skip this!\n"); + return; + } + + /* Record the continuous recovery fail count when recovery failed. */ + atomic_inc(&ar->fail_cont_count); + + /* Avoid having multiple recoveries at the same time. */ + return; + } - ar->p2p = !!ath10k_p2p; - ar->dev = dev; + atomic_inc(&ar->pending_recovery); + queue_work(ar->workqueue, &ar->restart_work); +} - ar->hif.priv = hif_priv; - ar->hif.ops = hif_ops; - ar->hif.bus = bus; +void ath10k_core_start_recovery(struct ath10k *ar) +{ + /* Use workqueue_aux to avoid blocking recovery tracking */ + queue_work(ar->workqueue_aux, &ar->recovery_check_work); +} +EXPORT_SYMBOL(ath10k_core_start_recovery); - ar->free_vdev_map = 0xFF; /* 8 vdevs */ +void ath10k_core_napi_enable(struct ath10k *ar) +{ + lockdep_assert_held(&ar->conf_mutex); - init_completion(&ar->scan.started); - init_completion(&ar->scan.completed); - init_completion(&ar->scan.on_channel); + if (test_bit(ATH10K_FLAG_NAPI_ENABLED, &ar->dev_flags)) + return; - init_completion(&ar->install_key_done); - init_completion(&ar->vdev_setup_done); + napi_enable(&ar->napi); + set_bit(ATH10K_FLAG_NAPI_ENABLED, &ar->dev_flags); +} +EXPORT_SYMBOL(ath10k_core_napi_enable); - setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar); +void ath10k_core_napi_sync_disable(struct ath10k *ar) +{ + lockdep_assert_held(&ar->conf_mutex); - ar->workqueue = create_singlethread_workqueue("ath10k_wq"); - if (!ar->workqueue) - goto err_wq; + if (!test_bit(ATH10K_FLAG_NAPI_ENABLED, &ar->dev_flags)) + return; - mutex_init(&ar->conf_mutex); - spin_lock_init(&ar->data_lock); + napi_synchronize(&ar->napi); + napi_disable(&ar->napi); + clear_bit(ATH10K_FLAG_NAPI_ENABLED, &ar->dev_flags); +} +EXPORT_SYMBOL(ath10k_core_napi_sync_disable); - INIT_LIST_HEAD(&ar->peers); - init_waitqueue_head(&ar->peer_mapping_wq); +static void ath10k_core_restart(struct work_struct *work) +{ + struct ath10k *ar = container_of(work, struct ath10k, restart_work); + int ret; - init_completion(&ar->offchan_tx_completed); - INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work); - skb_queue_head_init(&ar->offchan_tx_queue); + reinit_completion(&ar->driver_recovery); - init_waitqueue_head(&ar->event_queue); + set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags); - return ar; + /* Place a barrier to make sure the compiler doesn't reorder + * CRASH_FLUSH and calling other functions. + */ + barrier(); + + ieee80211_stop_queues(ar->hw); + ath10k_drain_tx(ar); + complete(&ar->scan.started); + complete(&ar->scan.completed); + complete(&ar->scan.on_channel); + complete(&ar->offchan_tx_completed); + complete(&ar->install_key_done); + complete(&ar->vdev_setup_done); + complete(&ar->vdev_delete_done); + complete(&ar->thermal.wmi_sync); + complete(&ar->bss_survey_done); + wake_up(&ar->htt.empty_tx_wq); + wake_up(&ar->wmi.tx_credits_wq); + wake_up(&ar->peer_mapping_wq); + + /* TODO: We can have one instance of cancelling coverage_class_work by + * moving it to ath10k_halt(), so that both stop() and restart() would + * call that but it takes conf_mutex() and if we call cancel_work_sync() + * with conf_mutex it will deadlock. + */ + cancel_work_sync(&ar->set_coverage_class_work); + + mutex_lock(&ar->conf_mutex); + + switch (ar->state) { + case ATH10K_STATE_ON: + ar->state = ATH10K_STATE_RESTARTING; + ath10k_halt(ar); + ath10k_scan_finish(ar); + ieee80211_restart_hw(ar->hw); + break; + case ATH10K_STATE_OFF: + /* this can happen if driver is being unloaded + * or if the crash happens during FW probing + */ + ath10k_warn(ar, "cannot restart a device that hasn't been started\n"); + break; + case ATH10K_STATE_RESTARTING: + /* hw restart might be requested from multiple places */ + break; + case ATH10K_STATE_RESTARTED: + ar->state = ATH10K_STATE_WEDGED; + fallthrough; + case ATH10K_STATE_WEDGED: + ath10k_warn(ar, "device is wedged, will not restart\n"); + break; + case ATH10K_STATE_UTF: + ath10k_warn(ar, "firmware restart in UTF mode not supported\n"); + break; + } -err_wq: - ath10k_mac_destroy(ar); - return NULL; + mutex_unlock(&ar->conf_mutex); + + ret = ath10k_coredump_submit(ar); + if (ret) + ath10k_warn(ar, "failed to send firmware crash dump via devcoredump: %d", + ret); } -EXPORT_SYMBOL(ath10k_core_create); -void ath10k_core_destroy(struct ath10k *ar) +static void ath10k_core_set_coverage_class_work(struct work_struct *work) { - flush_workqueue(ar->workqueue); - destroy_workqueue(ar->workqueue); + struct ath10k *ar = container_of(work, struct ath10k, + set_coverage_class_work); - ath10k_mac_destroy(ar); + if (ar->hw_params.hw_ops->set_coverage_class) + ar->hw_params.hw_ops->set_coverage_class(ar, -1, -1); } -EXPORT_SYMBOL(ath10k_core_destroy); +static int ath10k_core_init_firmware_features(struct ath10k *ar) +{ + struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file; + int max_num_peers; + + if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, fw_file->fw_features) && + !test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) { + ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well"); + return -EINVAL; + } + + if (fw_file->wmi_op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) { + ath10k_err(ar, "unsupported WMI OP version (max %d): %d\n", + ATH10K_FW_WMI_OP_VERSION_MAX, fw_file->wmi_op_version); + return -EINVAL; + } + + ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_NATIVE_WIFI; + switch (ath10k_cryptmode_param) { + case ATH10K_CRYPT_MODE_HW: + clear_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags); + clear_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags); + break; + case ATH10K_CRYPT_MODE_SW: + if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT, + fw_file->fw_features)) { + ath10k_err(ar, "cryptmode > 0 requires raw mode support from firmware"); + return -EINVAL; + } + + set_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags); + set_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags); + break; + default: + ath10k_info(ar, "invalid cryptmode: %d\n", + ath10k_cryptmode_param); + return -EINVAL; + } + + ar->htt.max_num_amsdu = ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT; + ar->htt.max_num_ampdu = ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT; + + if (ath10k_frame_mode == ATH10K_HW_TXRX_RAW) { + if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT, + fw_file->fw_features)) { + ath10k_err(ar, "rawmode = 1 requires support from firmware"); + return -EINVAL; + } + set_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags); + } + + if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { + ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_RAW; + + /* Workaround: + * + * Firmware A-MSDU aggregation breaks with RAW Tx encap mode + * and causes enormous performance issues (malformed frames, + * etc). + * + * Disabling A-MSDU makes RAW mode stable with heavy traffic + * albeit a bit slower compared to regular operation. + */ + ar->htt.max_num_amsdu = 1; + } + + /* Backwards compatibility for firmwares without + * ATH10K_FW_IE_WMI_OP_VERSION. + */ + if (fw_file->wmi_op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) { + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) { + if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, + fw_file->fw_features)) + fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_2; + else + fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_1; + } else { + fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_MAIN; + } + } + + switch (fw_file->wmi_op_version) { + case ATH10K_FW_WMI_OP_VERSION_MAIN: + max_num_peers = TARGET_NUM_PEERS; + ar->max_num_stations = TARGET_NUM_STATIONS; + ar->max_num_vdevs = TARGET_NUM_VDEVS; + ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC; + ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV | + WMI_STAT_PEER; + ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM; + break; + case ATH10K_FW_WMI_OP_VERSION_10_1: + case ATH10K_FW_WMI_OP_VERSION_10_2: + case ATH10K_FW_WMI_OP_VERSION_10_2_4: + if (ath10k_peer_stats_enabled(ar)) { + max_num_peers = TARGET_10X_TX_STATS_NUM_PEERS; + ar->max_num_stations = TARGET_10X_TX_STATS_NUM_STATIONS; + } else { + max_num_peers = TARGET_10X_NUM_PEERS; + ar->max_num_stations = TARGET_10X_NUM_STATIONS; + } + ar->max_num_vdevs = TARGET_10X_NUM_VDEVS; + ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC; + ar->fw_stats_req_mask = WMI_STAT_PEER; + ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM; + break; + case ATH10K_FW_WMI_OP_VERSION_TLV: + max_num_peers = TARGET_TLV_NUM_PEERS; + ar->max_num_stations = TARGET_TLV_NUM_STATIONS; + ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS; + ar->max_num_tdls_vdevs = TARGET_TLV_NUM_TDLS_VDEVS; + if (ar->hif.bus == ATH10K_BUS_SDIO) + ar->htt.max_num_pending_tx = + TARGET_TLV_NUM_MSDU_DESC_HL; + else + ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC; + ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS; + ar->fw_stats_req_mask = WMI_TLV_STAT_PDEV | WMI_TLV_STAT_VDEV | + WMI_TLV_STAT_PEER | WMI_TLV_STAT_PEER_EXTD; + ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM; + ar->wmi.mgmt_max_num_pending_tx = TARGET_TLV_MGMT_NUM_MSDU_DESC; + break; + case ATH10K_FW_WMI_OP_VERSION_10_4: + max_num_peers = TARGET_10_4_NUM_PEERS; + ar->max_num_stations = TARGET_10_4_NUM_STATIONS; + ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS; + ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS; + ar->num_tids = TARGET_10_4_TGT_NUM_TIDS; + ar->fw_stats_req_mask = WMI_10_4_STAT_PEER | + WMI_10_4_STAT_PEER_EXTD | + WMI_10_4_STAT_VDEV_EXTD; + ar->max_spatial_stream = ar->hw_params.max_spatial_stream; + ar->max_num_tdls_vdevs = TARGET_10_4_NUM_TDLS_VDEVS; + + if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, + fw_file->fw_features)) + ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC_PFC; + else + ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC; + break; + case ATH10K_FW_WMI_OP_VERSION_UNSET: + case ATH10K_FW_WMI_OP_VERSION_MAX: + default: + WARN_ON(1); + return -EINVAL; + } + + if (ar->hw_params.num_peers) + ar->max_num_peers = ar->hw_params.num_peers; + else + ar->max_num_peers = max_num_peers; + + /* Backwards compatibility for firmwares without + * ATH10K_FW_IE_HTT_OP_VERSION. + */ + if (fw_file->htt_op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) { + switch (fw_file->wmi_op_version) { + case ATH10K_FW_WMI_OP_VERSION_MAIN: + fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_MAIN; + break; + case ATH10K_FW_WMI_OP_VERSION_10_1: + case ATH10K_FW_WMI_OP_VERSION_10_2: + case ATH10K_FW_WMI_OP_VERSION_10_2_4: + fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_10_1; + break; + case ATH10K_FW_WMI_OP_VERSION_TLV: + fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_TLV; + break; + case ATH10K_FW_WMI_OP_VERSION_10_4: + case ATH10K_FW_WMI_OP_VERSION_UNSET: + case ATH10K_FW_WMI_OP_VERSION_MAX: + ath10k_err(ar, "htt op version not found from fw meta data"); + return -EINVAL; + } + } + + return 0; +} + +static int ath10k_core_reset_rx_filter(struct ath10k *ar) +{ + int ret; + int vdev_id; + int vdev_type; + int vdev_subtype; + const u8 *vdev_addr; + + vdev_id = 0; + vdev_type = WMI_VDEV_TYPE_STA; + vdev_subtype = ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE); + vdev_addr = ar->mac_addr; + + ret = ath10k_wmi_vdev_create(ar, vdev_id, vdev_type, vdev_subtype, + vdev_addr); + if (ret) { + ath10k_err(ar, "failed to create dummy vdev: %d\n", ret); + return ret; + } + + ret = ath10k_wmi_vdev_delete(ar, vdev_id); + if (ret) { + ath10k_err(ar, "failed to delete dummy vdev: %d\n", ret); + return ret; + } + + /* WMI and HTT may use separate HIF pipes and are not guaranteed to be + * serialized properly implicitly. + * + * Moreover (most) WMI commands have no explicit acknowledges. It is + * possible to infer it implicitly by poking firmware with echo + * command - getting a reply means all preceding comments have been + * (mostly) processed. + * + * In case of vdev create/delete this is sufficient. + * + * Without this it's possible to end up with a race when HTT Rx ring is + * started before vdev create/delete hack is complete allowing a short + * window of opportunity to receive (and Tx ACK) a bunch of frames. + */ + ret = ath10k_wmi_barrier(ar); + if (ret) { + ath10k_err(ar, "failed to ping firmware: %d\n", ret); + return ret; + } + + return 0; +} -int ath10k_core_register(struct ath10k *ar) +static int ath10k_core_compat_services(struct ath10k *ar) { - struct ath10k_htc_ops htc_ops; - struct bmi_target_info target_info; - int status; + struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file; - memset(&target_info, 0, sizeof(target_info)); - status = ath10k_bmi_get_target_info(ar, &target_info); - if (status) - goto err; + /* all 10.x firmware versions support thermal throttling but don't + * advertise the support via service flags so we have to hardcode + * it here + */ + switch (fw_file->wmi_op_version) { + case ATH10K_FW_WMI_OP_VERSION_10_1: + case ATH10K_FW_WMI_OP_VERSION_10_2: + case ATH10K_FW_WMI_OP_VERSION_10_2_4: + case ATH10K_FW_WMI_OP_VERSION_10_4: + set_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map); + break; + default: + break; + } - ar->target_version = target_info.version; - ar->hw->wiphy->hw_version = target_info.version; + return 0; +} - status = ath10k_init_hw_params(ar); - if (status) - goto err; +#define TGT_IRAM_READ_PER_ITR (8 * 1024) - if (ath10k_init_configure_target(ar)) { - status = -EINVAL; - goto err; +static int ath10k_core_copy_target_iram(struct ath10k *ar) +{ + const struct ath10k_hw_mem_layout *hw_mem; + const struct ath10k_mem_region *tmp, *mem_region = NULL; + dma_addr_t paddr; + void *vaddr = NULL; + u8 num_read_itr; + int i, ret; + u32 len, remaining_len; + + /* copy target iram feature must work also when + * ATH10K_FW_CRASH_DUMP_RAM_DATA is disabled, so + * _ath10k_coredump_get_mem_layout() to accomplist that + */ + hw_mem = _ath10k_coredump_get_mem_layout(ar); + if (!hw_mem) + /* if CONFIG_DEV_COREDUMP is disabled we get NULL, then + * just silently disable the feature by doing nothing + */ + return 0; + + for (i = 0; i < hw_mem->region_table.size; i++) { + tmp = &hw_mem->region_table.regions[i]; + if (tmp->type == ATH10K_MEM_REGION_TYPE_REG) { + mem_region = tmp; + break; + } } - status = ath10k_init_download_firmware(ar); - if (status) - goto err; + if (!mem_region) + return -ENOMEM; - status = ath10k_init_uart(ar); - if (status) - goto err; + for (i = 0; i < ar->wmi.num_mem_chunks; i++) { + if (ar->wmi.mem_chunks[i].req_id == + WMI_IRAM_RECOVERY_HOST_MEM_REQ_ID) { + vaddr = ar->wmi.mem_chunks[i].vaddr; + len = ar->wmi.mem_chunks[i].len; + break; + } + } - htc_ops.target_send_suspend_complete = ath10k_send_suspend_complete; + if (!vaddr || !len) { + ath10k_warn(ar, "No allocated memory for IRAM back up"); + return -ENOMEM; + } - ar->htc = ath10k_htc_create(ar, &htc_ops); - if (IS_ERR(ar->htc)) { - status = PTR_ERR(ar->htc); - ath10k_err("could not create HTC (%d)\n", status); + len = (len < mem_region->len) ? len : mem_region->len; + paddr = mem_region->start; + num_read_itr = len / TGT_IRAM_READ_PER_ITR; + remaining_len = len % TGT_IRAM_READ_PER_ITR; + for (i = 0; i < num_read_itr; i++) { + ret = ath10k_hif_diag_read(ar, paddr, vaddr, + TGT_IRAM_READ_PER_ITR); + if (ret) { + ath10k_warn(ar, "failed to copy firmware IRAM contents: %d", + ret); + return ret; + } + + paddr += TGT_IRAM_READ_PER_ITR; + vaddr += TGT_IRAM_READ_PER_ITR; + } + + if (remaining_len) { + ret = ath10k_hif_diag_read(ar, paddr, vaddr, remaining_len); + if (ret) { + ath10k_warn(ar, "failed to copy firmware IRAM contents: %d", + ret); + return ret; + } + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "target IRAM back up completed\n"); + + return 0; +} + +int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, + const struct ath10k_fw_components *fw) +{ + int status; + u32 val; + + lockdep_assert_held(&ar->conf_mutex); + + clear_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags); + + ar->running_fw = fw; + + if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, + ar->running_fw->fw_file.fw_features)) { + ath10k_bmi_start(ar); + + /* Enable hardware clock to speed up firmware download */ + if (ar->hw_params.hw_ops->enable_pll_clk) { + status = ar->hw_params.hw_ops->enable_pll_clk(ar); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot enable pll ret %d\n", + status); + } + + if (ath10k_init_configure_target(ar)) { + status = -EINVAL; + goto err; + } + + status = ath10k_download_cal_data(ar); + if (status) + goto err; + + /* Some of qca988x solutions are having global reset issue + * during target initialization. Bypassing PLL setting before + * downloading firmware and letting the SoC run on REF_CLK is + * fixing the problem. Corresponding firmware change is also + * needed to set the clock source once the target is + * initialized. + */ + if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT, + ar->running_fw->fw_file.fw_features)) { + status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1); + if (status) { + ath10k_err(ar, "could not write to skip_clock_init: %d\n", + status); + goto err; + } + } + + status = ath10k_download_fw(ar); + if (status) + goto err; + + status = ath10k_init_uart(ar); + if (status) + goto err; + + if (ar->hif.bus == ATH10K_BUS_SDIO) { + status = ath10k_init_sdio(ar, mode); + if (status) { + ath10k_err(ar, "failed to init SDIO: %d\n", status); + goto err; + } + } + } + + ar->htc.htc_ops.target_send_suspend_complete = + ath10k_send_suspend_complete; + + status = ath10k_htc_init(ar); + if (status) { + ath10k_err(ar, "could not init HTC (%d)\n", status); goto err; } - status = ath10k_bmi_done(ar); - if (status) - goto err_htc_destroy; + if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, + ar->running_fw->fw_file.fw_features)) { + status = ath10k_bmi_done(ar); + if (status) + goto err; + } status = ath10k_wmi_attach(ar); if (status) { - ath10k_err("WMI attach failed: %d\n", status); - goto err_htc_destroy; + ath10k_err(ar, "WMI attach failed: %d\n", status); + goto err; } - status = ath10k_htc_wait_target(ar->htc); - if (status) + status = ath10k_htt_init(ar); + if (status) { + ath10k_err(ar, "failed to init htt: %d\n", status); goto err_wmi_detach; + } - ar->htt = ath10k_htt_attach(ar); - if (!ar->htt) { - status = -ENOMEM; + status = ath10k_htt_tx_start(&ar->htt); + if (status) { + ath10k_err(ar, "failed to alloc htt tx: %d\n", status); goto err_wmi_detach; } - status = ath10k_init_connect_htc(ar); - if (status) - goto err_htt_detach; + /* If firmware indicates Full Rx Reorder support it must be used in a + * slightly different manner. Let HTT code know. + */ + ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER, + ar->wmi.svc_map)); - ath10k_info("firmware %s booted\n", ar->hw->wiphy->fw_version); + status = ath10k_htt_rx_alloc(&ar->htt); + if (status) { + ath10k_err(ar, "failed to alloc htt rx: %d\n", status); + goto err_htt_tx_detach; + } - status = ath10k_check_fw_version(ar); - if (status) - goto err_disconnect_htc; + status = ath10k_hif_start(ar); + if (status) { + ath10k_err(ar, "could not start HIF: %d\n", status); + goto err_htt_rx_detach; + } + + status = ath10k_htc_wait_target(&ar->htc); + if (status) { + ath10k_err(ar, "failed to connect to HTC: %d\n", status); + goto err_hif_stop; + } + + status = ath10k_hif_start_post(ar); + if (status) { + ath10k_err(ar, "failed to swap mailbox: %d\n", status); + goto err_hif_stop; + } + + if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { + status = ath10k_htt_connect(&ar->htt); + if (status) { + ath10k_err(ar, "failed to connect htt (%d)\n", status); + goto err_hif_stop; + } + } + + status = ath10k_wmi_connect(ar); + if (status) { + ath10k_err(ar, "could not connect wmi: %d\n", status); + goto err_hif_stop; + } + + status = ath10k_htc_start(&ar->htc); + if (status) { + ath10k_err(ar, "failed to start htc: %d\n", status); + goto err_hif_stop; + } + + if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { + status = ath10k_wmi_wait_for_service_ready(ar); + if (status) { + ath10k_warn(ar, "wmi service ready event not received"); + goto err_hif_stop; + } + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n", + ar->hw->wiphy->fw_version); + + if (test_bit(ATH10K_FW_FEATURE_IRAM_RECOVERY, + ar->running_fw->fw_file.fw_features)) { + status = ath10k_core_copy_target_iram(ar); + if (status) { + ath10k_warn(ar, "failed to copy target iram contents: %d", + status); + goto err_hif_stop; + } + } + + if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map) && + mode == ATH10K_FIRMWARE_MODE_NORMAL) { + val = 0; + if (ath10k_peer_stats_enabled(ar)) + val = WMI_10_4_PEER_STATS; + + /* Enable vdev stats by default */ + val |= WMI_10_4_VDEV_STATS; + + if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map)) + val |= WMI_10_4_BSS_CHANNEL_INFO_64; + + ath10k_core_fetch_btcoex_dt(ar); + + /* 10.4 firmware supports BT-Coex without reloading firmware + * via pdev param. To support Bluetooth coexistence pdev param, + * WMI_COEX_GPIO_SUPPORT of extended resource config should be + * enabled always. + * + * We can still enable BTCOEX if firmware has the support + * even though btceox_support value is + * ATH10K_DT_BTCOEX_NOT_FOUND + */ + + if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) && + test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, + ar->running_fw->fw_file.fw_features) && + ar->coex_support) + val |= WMI_10_4_COEX_GPIO_SUPPORT; + + if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, + ar->wmi.svc_map)) + val |= WMI_10_4_TDLS_EXPLICIT_MODE_ONLY; + + if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, + ar->wmi.svc_map)) + val |= WMI_10_4_TDLS_UAPSD_BUFFER_STA; + + if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, + ar->wmi.svc_map)) + val |= WMI_10_4_TX_DATA_ACK_RSSI; + + if (test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map)) + val |= WMI_10_4_REPORT_AIRTIME; + + if (test_bit(WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT, + ar->wmi.svc_map)) + val |= WMI_10_4_EXT_PEER_TID_CONFIGS_SUPPORT; + + status = ath10k_mac_ext_resource_config(ar, val); + if (status) { + ath10k_err(ar, + "failed to send ext resource cfg command : %d\n", + status); + goto err_hif_stop; + } + } status = ath10k_wmi_cmd_init(ar); if (status) { - ath10k_err("could not send WMI init command (%d)\n", status); - goto err_disconnect_htc; + ath10k_err(ar, "could not send WMI init command (%d)\n", + status); + goto err_hif_stop; } status = ath10k_wmi_wait_for_unified_ready(ar); - if (status <= 0) { - ath10k_err("wmi unified ready event not received\n"); - status = -ETIMEDOUT; - goto err_disconnect_htc; + if (status) { + ath10k_err(ar, "wmi unified ready event not received\n"); + goto err_hif_stop; + } + + status = ath10k_core_compat_services(ar); + if (status) { + ath10k_err(ar, "compat services failed: %d\n", status); + goto err_hif_stop; + } + + status = ath10k_wmi_pdev_set_base_macaddr(ar, ar->mac_addr); + if (status && status != -EOPNOTSUPP) { + ath10k_err(ar, + "failed to set base mac address: %d\n", status); + goto err_hif_stop; + } + + /* Some firmware revisions do not properly set up hardware rx filter + * registers. + * + * A known example from QCA9880 and 10.2.4 is that MAC_PCU_ADDR1_MASK + * is filled with 0s instead of 1s allowing HW to respond with ACKs to + * any frames that matches MAC_PCU_RX_FILTER which is also + * misconfigured to accept anything. + * + * The ADDR1 is programmed using internal firmware structure field and + * can't be (easily/sanely) reached from the driver explicitly. It is + * possible to implicitly make it correct by creating a dummy vdev and + * then deleting it. + */ + if (ar->hw_params.hw_filter_reset_required && + mode == ATH10K_FIRMWARE_MODE_NORMAL) { + status = ath10k_core_reset_rx_filter(ar); + if (status) { + ath10k_err(ar, + "failed to reset rx filter: %d\n", status); + goto err_hif_stop; + } + } + + status = ath10k_htt_rx_ring_refill(ar); + if (status) { + ath10k_err(ar, "failed to refill htt rx ring: %d\n", status); + goto err_hif_stop; + } + + if (ar->max_num_vdevs >= 64) + ar->free_vdev_map = 0xFFFFFFFFFFFFFFFFLL; + else + ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1; + + INIT_LIST_HEAD(&ar->arvifs); + + /* we don't care about HTT in UTF mode */ + if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { + status = ath10k_htt_setup(&ar->htt); + if (status) { + ath10k_err(ar, "failed to setup htt: %d\n", status); + goto err_hif_stop; + } } - status = ath10k_htt_attach_target(ar->htt); + status = ath10k_debug_start(ar); if (status) - goto err_disconnect_htc; + goto err_hif_stop; - status = ath10k_mac_register(ar); + status = ath10k_hif_set_target_log_mode(ar, fw_diag_log); + if (status && status != -EOPNOTSUPP) { + ath10k_warn(ar, "set target log mode failed: %d\n", status); + goto err_hif_stop; + } + + status = ath10k_leds_start(ar); if (status) - goto err_disconnect_htc; + goto err_hif_stop; + + return 0; + +err_hif_stop: + ath10k_hif_stop(ar); +err_htt_rx_detach: + ath10k_htt_rx_free(&ar->htt); +err_htt_tx_detach: + ath10k_htt_tx_free(&ar->htt); +err_wmi_detach: + ath10k_wmi_detach(ar); +err: + return status; +} +EXPORT_SYMBOL(ath10k_core_start); + +int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt) +{ + int ret; + unsigned long time_left; + + reinit_completion(&ar->target_suspend); + + ret = ath10k_wmi_pdev_suspend_target(ar, suspend_opt); + if (ret) { + ath10k_warn(ar, "could not suspend target (%d)\n", ret); + return ret; + } + + time_left = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ); + + if (!time_left) { + ath10k_warn(ar, "suspend timed out - target pause event never came\n"); + return -ETIMEDOUT; + } + + return 0; +} + +void ath10k_core_stop(struct ath10k *ar) +{ + lockdep_assert_held(&ar->conf_mutex); + ath10k_debug_stop(ar); + + /* try to suspend target */ + if (ar->state != ATH10K_STATE_RESTARTING && + ar->state != ATH10K_STATE_UTF) + ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR); + + ath10k_hif_stop(ar); + ath10k_htt_tx_stop(&ar->htt); + ath10k_htt_rx_free(&ar->htt); + ath10k_wmi_detach(ar); + + ar->id.bmi_ids_valid = false; +} +EXPORT_SYMBOL(ath10k_core_stop); + +/* mac80211 manages fw/hw initialization through start/stop hooks. However in + * order to know what hw capabilities should be advertised to mac80211 it is + * necessary to load the firmware (and tear it down immediately since start + * hook will try to init it again) before registering + */ +static int ath10k_core_probe_fw(struct ath10k *ar) +{ + struct bmi_target_info target_info = {}; + int ret = 0; + + ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_NORMAL); + if (ret) { + ath10k_err(ar, "could not power on hif bus (%d)\n", ret); + return ret; + } + + switch (ar->hif.bus) { + case ATH10K_BUS_SDIO: + ret = ath10k_bmi_get_target_info_sdio(ar, &target_info); + if (ret) { + ath10k_err(ar, "could not get target info (%d)\n", ret); + goto err_power_down; + } + ar->target_version = target_info.version; + ar->hw->wiphy->hw_version = target_info.version; + break; + case ATH10K_BUS_PCI: + case ATH10K_BUS_AHB: + case ATH10K_BUS_USB: + ret = ath10k_bmi_get_target_info(ar, &target_info); + if (ret) { + ath10k_err(ar, "could not get target info (%d)\n", ret); + goto err_power_down; + } + ar->target_version = target_info.version; + ar->hw->wiphy->hw_version = target_info.version; + break; + case ATH10K_BUS_SNOC: + ret = ath10k_hif_get_target_info(ar, &target_info); + if (ret) { + ath10k_err(ar, "could not get target info (%d)\n", ret); + goto err_power_down; + } + ar->target_version = target_info.version; + ar->hw->wiphy->hw_version = target_info.version; + break; + default: + ath10k_err(ar, "incorrect hif bus type: %d\n", ar->hif.bus); + } + + ret = ath10k_init_hw_params(ar); + if (ret) { + ath10k_err(ar, "could not get hw params (%d)\n", ret); + goto err_power_down; + } + + ret = ath10k_core_fetch_firmware_files(ar); + if (ret) { + ath10k_err(ar, "could not fetch firmware files (%d)\n", ret); + goto err_power_down; + } + + BUILD_BUG_ON(sizeof(ar->hw->wiphy->fw_version) != + sizeof(ar->normal_mode_fw.fw_file.fw_version)); + memcpy(ar->hw->wiphy->fw_version, ar->normal_mode_fw.fw_file.fw_version, + sizeof(ar->hw->wiphy->fw_version)); + + ath10k_debug_print_hwfw_info(ar); + + if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, + ar->normal_mode_fw.fw_file.fw_features)) { + ret = ath10k_core_pre_cal_download(ar); + if (ret) { + /* pre calibration data download is not necessary + * for all the chipsets. Ignore failures and continue. + */ + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "could not load pre cal data: %d\n", ret); + } + + ret = ath10k_core_get_board_id_from_otp(ar); + if (ret && ret != -EOPNOTSUPP) { + ath10k_err(ar, "failed to get board id from otp: %d\n", + ret); + goto err_free_firmware_files; + } + + ret = ath10k_core_check_smbios(ar); + if (ret) + ath10k_dbg(ar, ATH10K_DBG_BOOT, "SMBIOS bdf variant name not set.\n"); + + ret = ath10k_core_check_dt(ar); + if (ret) + ath10k_dbg(ar, ATH10K_DBG_BOOT, "DT bdf variant name not set.\n"); + + ret = ath10k_core_fetch_board_file(ar, ATH10K_BD_IE_BOARD); + if (ret) { + ath10k_err(ar, "failed to fetch board file: %d\n", ret); + goto err_free_firmware_files; + } + + ath10k_debug_print_board_info(ar); + } + + device_get_mac_address(ar->dev, ar->mac_addr); + + ret = ath10k_core_init_firmware_features(ar); + if (ret) { + ath10k_err(ar, "fatal problem with firmware features: %d\n", + ret); + goto err_free_firmware_files; + } + + if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, + ar->normal_mode_fw.fw_file.fw_features)) { + ret = ath10k_swap_code_seg_init(ar, + &ar->normal_mode_fw.fw_file); + if (ret) { + ath10k_err(ar, "failed to initialize code swap segment: %d\n", + ret); + goto err_free_firmware_files; + } + } + + mutex_lock(&ar->conf_mutex); + + ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL, + &ar->normal_mode_fw); + if (ret) { + ath10k_err(ar, "could not init core (%d)\n", ret); + goto err_unlock; + } + + ath10k_debug_print_boot_info(ar); + ath10k_core_stop(ar); + + mutex_unlock(&ar->conf_mutex); - status = ath10k_debug_create(ar); + ath10k_hif_power_down(ar); + return 0; + +err_unlock: + mutex_unlock(&ar->conf_mutex); + +err_free_firmware_files: + ath10k_core_free_firmware_files(ar); + +err_power_down: + ath10k_hif_power_down(ar); + + return ret; +} + +static void ath10k_core_register_work(struct work_struct *work) +{ + struct ath10k *ar = container_of(work, struct ath10k, register_work); + int status; + + /* peer stats are enabled by default */ + set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags); + + status = ath10k_core_probe_fw(ar); + if (status) { + ath10k_err(ar, "could not probe fw (%d)\n", status); + goto err; + } + + status = ath10k_mac_register(ar); if (status) { - ath10k_err("unable to initialize debugfs\n"); + ath10k_err(ar, "could not register to mac80211 (%d)\n", status); + goto err_release_fw; + } + + status = ath10k_coredump_register(ar); + if (status) { + ath10k_err(ar, "unable to register coredump\n"); goto err_unregister_mac; } - return 0; + status = ath10k_debug_register(ar); + if (status) { + ath10k_err(ar, "unable to initialize debugfs\n"); + goto err_unregister_coredump; + } + status = ath10k_spectral_create(ar); + if (status) { + ath10k_err(ar, "failed to initialize spectral\n"); + goto err_debug_destroy; + } + + status = ath10k_thermal_register(ar); + if (status) { + ath10k_err(ar, "could not register thermal device: %d\n", + status); + goto err_spectral_destroy; + } + + status = ath10k_leds_register(ar); + if (status) { + ath10k_err(ar, "could not register leds: %d\n", + status); + goto err_thermal_unregister; + } + + set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags); + return; + +err_thermal_unregister: + ath10k_thermal_unregister(ar); +err_spectral_destroy: + ath10k_spectral_destroy(ar); +err_debug_destroy: + ath10k_debug_destroy(ar); +err_unregister_coredump: + ath10k_coredump_unregister(ar); err_unregister_mac: ath10k_mac_unregister(ar); -err_disconnect_htc: - ath10k_htc_stop(ar->htc); -err_htt_detach: - ath10k_htt_detach(ar->htt); -err_wmi_detach: - ath10k_wmi_detach(ar); -err_htc_destroy: - ath10k_htc_destroy(ar->htc); +err_release_fw: + ath10k_core_free_firmware_files(ar); err: - return status; + /* TODO: It's probably a good idea to release device from the driver + * but calling device_release_driver() here will cause a deadlock. + */ + return; +} + +int ath10k_core_register(struct ath10k *ar, + const struct ath10k_bus_params *bus_params) +{ + ar->bus_param = *bus_params; + + queue_work(ar->workqueue, &ar->register_work); + + return 0; } EXPORT_SYMBOL(ath10k_core_register); void ath10k_core_unregister(struct ath10k *ar) { + cancel_work_sync(&ar->register_work); + + if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) + return; + + ath10k_leds_unregister(ar); + + ath10k_thermal_unregister(ar); + /* Stop spectral before unregistering from mac80211 to remove the + * relayfs debugfs file cleanly. Otherwise the parent debugfs tree + * would be already be free'd recursively, leading to a double free. + */ + ath10k_spectral_destroy(ar); + /* We must unregister from mac80211 before we stop HTC and HIF. * Otherwise we will fail to submit commands to FW and mac80211 will be - * unhappy about callback failures. */ + * unhappy about callback failures. + */ ath10k_mac_unregister(ar); - ath10k_htc_stop(ar->htc); - ath10k_htt_detach(ar->htt); - ath10k_wmi_detach(ar); - ath10k_htc_destroy(ar->htc); + + ath10k_testmode_destroy(ar); + + ath10k_core_free_firmware_files(ar); + ath10k_core_free_board_files(ar); + + ath10k_debug_unregister(ar); } EXPORT_SYMBOL(ath10k_core_unregister); -int ath10k_core_target_suspend(struct ath10k *ar) +struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, + enum ath10k_bus bus, + enum ath10k_hw_rev hw_rev, + const struct ath10k_hif_ops *hif_ops) { + struct ath10k *ar; int ret; - ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__); + ar = ath10k_mac_create(priv_size); + if (!ar) + return NULL; + + ar->ath_common.priv = ar; + ar->ath_common.hw = ar->hw; + ar->dev = dev; + ar->hw_rev = hw_rev; + ar->hif.ops = hif_ops; + ar->hif.bus = bus; + + switch (hw_rev) { + case ATH10K_HW_QCA988X: + case ATH10K_HW_QCA9887: + ar->regs = &qca988x_regs; + ar->hw_ce_regs = &qcax_ce_regs; + ar->hw_values = &qca988x_values; + break; + case ATH10K_HW_QCA6174: + case ATH10K_HW_QCA9377: + ar->regs = &qca6174_regs; + ar->hw_ce_regs = &qcax_ce_regs; + ar->hw_values = &qca6174_values; + break; + case ATH10K_HW_QCA99X0: + case ATH10K_HW_QCA9984: + ar->regs = &qca99x0_regs; + ar->hw_ce_regs = &qcax_ce_regs; + ar->hw_values = &qca99x0_values; + break; + case ATH10K_HW_QCA9888: + ar->regs = &qca99x0_regs; + ar->hw_ce_regs = &qcax_ce_regs; + ar->hw_values = &qca9888_values; + break; + case ATH10K_HW_QCA4019: + ar->regs = &qca4019_regs; + ar->hw_ce_regs = &qcax_ce_regs; + ar->hw_values = &qca4019_values; + break; + case ATH10K_HW_WCN3990: + ar->regs = &wcn3990_regs; + ar->hw_ce_regs = &wcn3990_ce_regs; + ar->hw_values = &wcn3990_values; + break; + default: + ath10k_err(ar, "unsupported core hardware revision %d\n", + hw_rev); + ret = -EOPNOTSUPP; + goto err_free_mac; + } + + init_completion(&ar->scan.started); + init_completion(&ar->scan.completed); + init_completion(&ar->scan.on_channel); + init_completion(&ar->target_suspend); + init_completion(&ar->driver_recovery); + init_completion(&ar->wow.wakeup_completed); - ret = ath10k_wmi_pdev_suspend_target(ar); + init_completion(&ar->install_key_done); + init_completion(&ar->vdev_setup_done); + init_completion(&ar->vdev_delete_done); + init_completion(&ar->thermal.wmi_sync); + init_completion(&ar->bss_survey_done); + init_completion(&ar->peer_delete_done); + init_completion(&ar->peer_stats_info_complete); + + INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work); + + ar->workqueue = create_singlethread_workqueue("ath10k_wq"); + if (!ar->workqueue) + goto err_free_mac; + + ar->workqueue_aux = create_singlethread_workqueue("ath10k_aux_wq"); + if (!ar->workqueue_aux) + goto err_free_wq; + + ar->workqueue_tx_complete = + create_singlethread_workqueue("ath10k_tx_complete_wq"); + if (!ar->workqueue_tx_complete) + goto err_free_aux_wq; + + mutex_init(&ar->conf_mutex); + mutex_init(&ar->dump_mutex); + spin_lock_init(&ar->data_lock); + + for (int ac = 0; ac < IEEE80211_NUM_ACS; ac++) + spin_lock_init(&ar->queue_lock[ac]); + + INIT_LIST_HEAD(&ar->peers); + init_waitqueue_head(&ar->peer_mapping_wq); + init_waitqueue_head(&ar->htt.empty_tx_wq); + init_waitqueue_head(&ar->wmi.tx_credits_wq); + + skb_queue_head_init(&ar->htt.rx_indication_head); + + init_completion(&ar->offchan_tx_completed); + INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work); + skb_queue_head_init(&ar->offchan_tx_queue); + + INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work); + skb_queue_head_init(&ar->wmi_mgmt_tx_queue); + + INIT_WORK(&ar->register_work, ath10k_core_register_work); + INIT_WORK(&ar->restart_work, ath10k_core_restart); + INIT_WORK(&ar->recovery_check_work, ath10k_core_recovery_check_work); + INIT_WORK(&ar->set_coverage_class_work, + ath10k_core_set_coverage_class_work); + + ar->napi_dev = alloc_netdev_dummy(0); + if (!ar->napi_dev) + goto err_free_tx_complete; + + ret = ath10k_coredump_create(ar); if (ret) - ath10k_warn("could not suspend target (%d)\n", ret); + goto err_free_netdev; - return ret; + ret = ath10k_debug_create(ar); + if (ret) + goto err_free_coredump; + + return ar; + +err_free_coredump: + ath10k_coredump_destroy(ar); +err_free_netdev: + free_netdev(ar->napi_dev); +err_free_tx_complete: + destroy_workqueue(ar->workqueue_tx_complete); +err_free_aux_wq: + destroy_workqueue(ar->workqueue_aux); +err_free_wq: + destroy_workqueue(ar->workqueue); +err_free_mac: + ath10k_mac_destroy(ar); + + return NULL; } -EXPORT_SYMBOL(ath10k_core_target_suspend); +EXPORT_SYMBOL(ath10k_core_create); -int ath10k_core_target_resume(struct ath10k *ar) +void ath10k_core_destroy(struct ath10k *ar) { - int ret; + destroy_workqueue(ar->workqueue); - ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__); + destroy_workqueue(ar->workqueue_aux); - ret = ath10k_wmi_pdev_resume_target(ar); - if (ret) - ath10k_warn("could not resume target (%d)\n", ret); + destroy_workqueue(ar->workqueue_tx_complete); - return ret; + free_netdev(ar->napi_dev); + ath10k_debug_destroy(ar); + ath10k_coredump_destroy(ar); + ath10k_htt_tx_destroy(&ar->htt); + ath10k_wmi_free_host_mem(ar); + ath10k_mac_destroy(ar); } -EXPORT_SYMBOL(ath10k_core_target_resume); +EXPORT_SYMBOL(ath10k_core_destroy); MODULE_AUTHOR("Qualcomm Atheros"); -MODULE_DESCRIPTION("Core module for QCA988X PCIe devices."); +MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11ac wireless LAN cards."); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 539336d1be4b..73a9db302245 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -1,18 +1,9 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #ifndef _CORE_H_ @@ -22,52 +13,130 @@ #include <linux/if_ether.h> #include <linux/types.h> #include <linux/pci.h> +#include <linux/uuid.h> +#include <linux/time.h> +#include <linux/leds.h> +#include "htt.h" #include "htc.h" #include "hw.h" #include "targaddrs.h" #include "wmi.h" #include "../ath.h" #include "../regd.h" +#include "../dfs_pattern_detector.h" +#include "spectral.h" +#include "thermal.h" +#include "wow.h" +#include "swap.h" #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB) #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) #define WO(_f) ((_f##_OFFSET) >> 2) #define ATH10K_SCAN_ID 0 +#define ATH10K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD 10 /* msec */ #define WMI_READY_TIMEOUT (5 * HZ) -#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ) +#define ATH10K_FLUSH_TIMEOUT_HZ (5 * HZ) +#define ATH10K_CONNECTION_LOSS_HZ (3 * HZ) +#define ATH10K_NUM_CHANS 41 +#define ATH10K_MAX_5G_CHAN 173 /* Antenna noise floor */ #define ATH10K_DEFAULT_NOISE_FLOOR -95 +#define ATH10K_INVALID_RSSI 128 + +#define ATH10K_MAX_NUM_MGMT_PENDING 128 + +/* number of failed packets (20 packets with 16 sw reties each) */ +#define ATH10K_KICKOUT_THRESHOLD (20 * 16) + +/* + * Use insanely high numbers to make sure that the firmware implementation + * won't start, we have the same functionality already in hostapd. Unit + * is seconds. + */ +#define ATH10K_KEEPALIVE_MIN_IDLE 3747 +#define ATH10K_KEEPALIVE_MAX_IDLE 3895 +#define ATH10K_KEEPALIVE_MAX_UNRESPONSIVE 3900 + +/* SMBIOS type containing Board Data File Name Extension */ +#define ATH10K_SMBIOS_BDF_EXT_TYPE 0xF8 + +/* SMBIOS type structure length (excluding strings-set) */ +#define ATH10K_SMBIOS_BDF_EXT_LENGTH 0x9 + +/* Offset pointing to Board Data File Name Extension */ +#define ATH10K_SMBIOS_BDF_EXT_OFFSET 0x8 + +/* Board Data File Name Extension string length. + * String format: BDF_<Customer ID>_<Extension>\0 + */ +#define ATH10K_SMBIOS_BDF_EXT_STR_LENGTH 0x20 + +/* The magic used by QCA spec */ +#define ATH10K_SMBIOS_BDF_EXT_MAGIC "BDF_" + +/* Default Airtime weight multiplier (Tuned for multiclient performance) */ +#define ATH10K_AIRTIME_WEIGHT_MULTIPLIER 4 + +#define ATH10K_MAX_RETRY_COUNT 30 + +#define ATH10K_ITER_NORMAL_FLAGS (IEEE80211_IFACE_ITER_NORMAL | \ + IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER) +#define ATH10K_ITER_RESUME_FLAGS (IEEE80211_IFACE_ITER_RESUME_ALL |\ + IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER) +#define ATH10K_RECOVERY_TIMEOUT_HZ (5 * HZ) +#define ATH10K_RECOVERY_MAX_FAIL_COUNT 4 + struct ath10k; -enum ath10k_bus { - ATH10K_BUS_PCI, +static inline const char *ath10k_bus_str(enum ath10k_bus bus) +{ + switch (bus) { + case ATH10K_BUS_PCI: + return "pci"; + case ATH10K_BUS_AHB: + return "ahb"; + case ATH10K_BUS_SDIO: + return "sdio"; + case ATH10K_BUS_USB: + return "usb"; + case ATH10K_BUS_SNOC: + return "snoc"; + } + + return "unknown"; +} + +enum ath10k_skb_flags { + ATH10K_SKB_F_NO_HWCRYPT = BIT(0), + ATH10K_SKB_F_DTIM_ZERO = BIT(1), + ATH10K_SKB_F_DELIVER_CAB = BIT(2), + ATH10K_SKB_F_MGMT = BIT(3), + ATH10K_SKB_F_QOS = BIT(4), + ATH10K_SKB_F_RAW_TX = BIT(5), + ATH10K_SKB_F_NOACK_TID = BIT(6), }; struct ath10k_skb_cb { dma_addr_t paddr; - bool is_mapped; - bool is_aborted; - - struct { - u8 vdev_id; - u16 msdu_id; - u8 tid; - bool is_offchan; - bool is_conf; - bool discard; - bool no_ack; - u8 refcount; - struct sk_buff *txfrag; - struct sk_buff *msdu; - } __packed htt; - - /* 4 bytes left on 64bit arch */ + u8 flags; + u8 eid; + u16 msdu_id; + u16 airtime_est; + struct ieee80211_vif *vif; + struct ieee80211_txq *txq; + u32 ucast_cipher; } __packed; +struct ath10k_skb_rxcb { + dma_addr_t paddr; + struct hlist_node hlist; + u8 eid; +}; + static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb) { BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) > @@ -75,67 +144,150 @@ static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb) return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data; } -static inline int ath10k_skb_map(struct device *dev, struct sk_buff *skb) +static inline struct ath10k_skb_rxcb *ATH10K_SKB_RXCB(struct sk_buff *skb) { - if (ATH10K_SKB_CB(skb)->is_mapped) - return -EINVAL; - - ATH10K_SKB_CB(skb)->paddr = dma_map_single(dev, skb->data, skb->len, - DMA_TO_DEVICE); - - if (unlikely(dma_mapping_error(dev, ATH10K_SKB_CB(skb)->paddr))) - return -EIO; - - ATH10K_SKB_CB(skb)->is_mapped = true; - return 0; + BUILD_BUG_ON(sizeof(struct ath10k_skb_rxcb) > sizeof(skb->cb)); + return (struct ath10k_skb_rxcb *)skb->cb; } -static inline int ath10k_skb_unmap(struct device *dev, struct sk_buff *skb) -{ - if (!ATH10K_SKB_CB(skb)->is_mapped) - return -EINVAL; - - dma_unmap_single(dev, ATH10K_SKB_CB(skb)->paddr, skb->len, - DMA_TO_DEVICE); - ATH10K_SKB_CB(skb)->is_mapped = false; - return 0; -} +#define ATH10K_RXCB_SKB(rxcb) \ + container_of((void *)rxcb, struct sk_buff, cb) static inline u32 host_interest_item_address(u32 item_offset) { return QCA988X_HOST_INTEREST_ADDRESS + item_offset; } +enum ath10k_phy_mode { + ATH10K_PHY_MODE_LEGACY = 0, + ATH10K_PHY_MODE_HT = 1, + ATH10K_PHY_MODE_VHT = 2, +}; + +/* Data rate 100KBPS based on IE Index */ +struct ath10k_index_ht_data_rate_type { + u8 beacon_rate_index; + u16 supported_rate[4]; +}; + +/* Data rate 100KBPS based on IE Index */ +struct ath10k_index_vht_data_rate_type { + u8 beacon_rate_index; + u16 supported_VHT80_rate[2]; + u16 supported_VHT40_rate[2]; + u16 supported_VHT20_rate[2]; +}; + struct ath10k_bmi { bool done_sent; }; +struct ath10k_mem_chunk { + void *vaddr; + dma_addr_t paddr; + u32 len; + u32 req_id; +}; + struct ath10k_wmi { enum ath10k_htc_ep_id eid; struct completion service_ready; struct completion unified_ready; - atomic_t pending_tx_count; - wait_queue_head_t wq; - - struct sk_buff_head wmi_event_list; - struct work_struct wmi_event_work; + struct completion barrier; + struct completion radar_confirm; + wait_queue_head_t tx_credits_wq; + DECLARE_BITMAP(svc_map, WMI_SERVICE_MAX); + struct wmi_cmd_map *cmd; + struct wmi_vdev_param_map *vdev_param; + struct wmi_pdev_param_map *pdev_param; + struct wmi_peer_param_map *peer_param; + const struct wmi_ops *ops; + const struct wmi_peer_flags_map *peer_flags; + + u32 mgmt_max_num_pending_tx; + + /* Protected by data_lock */ + struct idr mgmt_pending_tx; + + u32 num_mem_chunks; + u32 rx_decap_mode; + struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS]; }; -struct ath10k_peer_stat { +struct ath10k_fw_stats_peer { + struct list_head list; + u8 peer_macaddr[ETH_ALEN]; u32 peer_rssi; u32 peer_tx_rate; + u32 peer_rx_rate; /* 10x only */ + u64 rx_duration; }; -struct ath10k_target_stats { +struct ath10k_fw_extd_stats_peer { + struct list_head list; + + u8 peer_macaddr[ETH_ALEN]; + u64 rx_duration; +}; + +struct ath10k_fw_stats_vdev { + struct list_head list; + + u32 vdev_id; + u32 beacon_snr; + u32 data_snr; + u32 num_tx_frames[4]; + u32 num_rx_frames; + u32 num_tx_frames_retries[4]; + u32 num_tx_frames_failures[4]; + u32 num_rts_fail; + u32 num_rts_success; + u32 num_rx_err; + u32 num_rx_discard; + u32 num_tx_not_acked; + u32 tx_rate_history[10]; + u32 beacon_rssi_history[10]; +}; + +struct ath10k_fw_stats_vdev_extd { + struct list_head list; + + u32 vdev_id; + u32 ppdu_aggr_cnt; + u32 ppdu_noack; + u32 mpdu_queued; + u32 ppdu_nonaggr_cnt; + u32 mpdu_sw_requeued; + u32 mpdu_suc_retry; + u32 mpdu_suc_multitry; + u32 mpdu_fail_retry; + u32 tx_ftm_suc; + u32 tx_ftm_suc_retry; + u32 tx_ftm_fail; + u32 rx_ftmr_cnt; + u32 rx_ftmr_dup_cnt; + u32 rx_iftmr_cnt; + u32 rx_iftmr_dup_cnt; +}; + +struct ath10k_fw_stats_pdev { + struct list_head list; + /* PDEV stats */ s32 ch_noise_floor; - u32 tx_frame_count; - u32 rx_frame_count; - u32 rx_clear_count; - u32 cycle_count; + u32 tx_frame_count; /* Cycles spent transmitting frames */ + u32 rx_frame_count; /* Cycles spent receiving frames */ + u32 rx_clear_count; /* Total channel busy time, evidently */ + u32 cycle_count; /* Total on-channel time */ u32 phy_err_count; u32 chan_tx_power; + u32 ack_rx_bad; + u32 rts_bad; + u32 rts_good; + u32 fcs_bad; + u32 no_beacons; + u32 mib_int_count; /* PDEV TX stats */ s32 comp_queued; @@ -148,8 +300,9 @@ struct ath10k_target_stats { s32 hw_queued; s32 hw_reaped; s32 underrun; + u32 hw_paused; s32 tx_abort; - s32 mpdus_requed; + s32 mpdus_requeued; u32 tx_ko; u32 data_rc; u32 self_triggers; @@ -160,6 +313,16 @@ struct ath10k_target_stats { u32 pdev_resets; u32 phy_underrun; u32 txop_ovf; + u32 seq_posted; + u32 seq_failed_queueing; + u32 seq_completed; + u32 seq_restarted; + u32 mu_seq_posted; + u32 mpdus_sw_flush; + u32 mpdus_hw_filter; + u32 mpdus_truncated; + u32 mpdus_ack_failed; + u32 mpdus_expired; /* PDEV RX stats */ s32 mid_ppdu_route_change; @@ -176,64 +339,307 @@ struct ath10k_target_stats { s32 phy_errs; s32 phy_err_drop; s32 mpdu_errs; + s32 rx_ovfl_errs; +}; + +struct ath10k_fw_stats { + bool extended; + struct list_head pdevs; + struct list_head vdevs; + struct list_head peers; + struct list_head peers_extd; +}; + +#define ATH10K_TPC_TABLE_TYPE_FLAG 1 +#define ATH10K_TPC_PREAM_TABLE_END 0xFFFF - /* VDEV STATS */ +struct ath10k_tpc_table { + u32 pream_idx[WMI_TPC_RATE_MAX]; + u8 rate_code[WMI_TPC_RATE_MAX]; + char tpc_value[WMI_TPC_RATE_MAX][WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE]; +}; - /* PEER STATS */ - u8 peers; - struct ath10k_peer_stat peer_stat[TARGET_NUM_PEERS]; +struct ath10k_tpc_stats { + u32 reg_domain; + u32 chan_freq; + u32 phy_mode; + u32 twice_antenna_reduction; + u32 twice_max_rd_power; + s32 twice_antenna_gain; + u32 power_limit; + u32 num_tx_chain; + u32 ctl; + u32 rate_max; + u8 flag[WMI_TPC_FLAG]; + struct ath10k_tpc_table tpc_table[WMI_TPC_FLAG]; +}; - /* TODO: Beacon filter stats */ +struct ath10k_tpc_table_final { + u32 pream_idx[WMI_TPC_FINAL_RATE_MAX]; + u8 rate_code[WMI_TPC_FINAL_RATE_MAX]; + char tpc_value[WMI_TPC_FINAL_RATE_MAX][WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE]; +}; + +struct ath10k_tpc_stats_final { + u32 reg_domain; + u32 chan_freq; + u32 phy_mode; + u32 twice_antenna_reduction; + u32 twice_max_rd_power; + s32 twice_antenna_gain; + u32 power_limit; + u32 num_tx_chain; + u32 ctl; + u32 rate_max; + u8 flag[WMI_TPC_FLAG]; + struct ath10k_tpc_table_final tpc_table_final[WMI_TPC_FLAG]; +}; +struct ath10k_dfs_stats { + u32 phy_errors; + u32 pulses_total; + u32 pulses_detected; + u32 pulses_discarded; + u32 radar_detected; +}; + +enum ath10k_radar_confirmation_state { + ATH10K_RADAR_CONFIRMATION_IDLE = 0, + ATH10K_RADAR_CONFIRMATION_INPROGRESS, + ATH10K_RADAR_CONFIRMATION_STOPPED, +}; + +struct ath10k_radar_found_info { + u32 pri_min; + u32 pri_max; + u32 width_min; + u32 width_max; + u32 sidx_min; + u32 sidx_max; }; #define ATH10K_MAX_NUM_PEER_IDS (1 << 11) /* htt rx_desc limit */ struct ath10k_peer { struct list_head list; + struct ieee80211_vif *vif; + struct ieee80211_sta *sta; + + bool removed; int vdev_id; u8 addr[ETH_ALEN]; DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS); + + /* protected by ar->data_lock */ struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1]; + union htt_rx_pn_t tids_last_pn[ATH10K_TXRX_NUM_EXT_TIDS]; + bool tids_last_pn_valid[ATH10K_TXRX_NUM_EXT_TIDS]; + union htt_rx_pn_t frag_tids_last_pn[ATH10K_TXRX_NUM_EXT_TIDS]; + u32 frag_tids_seq[ATH10K_TXRX_NUM_EXT_TIDS]; + struct { + enum htt_security_types sec_type; + int pn_len; + } rx_pn[ATH10K_HTT_TXRX_PEER_SECURITY_MAX]; +}; + +struct ath10k_txq { + struct list_head list; + unsigned long num_fw_queued; + unsigned long num_push_allowed; +}; + +enum ath10k_pkt_rx_err { + ATH10K_PKT_RX_ERR_FCS, + ATH10K_PKT_RX_ERR_TKIP, + ATH10K_PKT_RX_ERR_CRYPT, + ATH10K_PKT_RX_ERR_PEER_IDX_INVAL, + ATH10K_PKT_RX_ERR_MAX, }; -#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ) +enum ath10k_ampdu_subfrm_num { + ATH10K_AMPDU_SUBFRM_NUM_10, + ATH10K_AMPDU_SUBFRM_NUM_20, + ATH10K_AMPDU_SUBFRM_NUM_30, + ATH10K_AMPDU_SUBFRM_NUM_40, + ATH10K_AMPDU_SUBFRM_NUM_50, + ATH10K_AMPDU_SUBFRM_NUM_60, + ATH10K_AMPDU_SUBFRM_NUM_MORE, + ATH10K_AMPDU_SUBFRM_NUM_MAX, +}; + +enum ath10k_amsdu_subfrm_num { + ATH10K_AMSDU_SUBFRM_NUM_1, + ATH10K_AMSDU_SUBFRM_NUM_2, + ATH10K_AMSDU_SUBFRM_NUM_3, + ATH10K_AMSDU_SUBFRM_NUM_4, + ATH10K_AMSDU_SUBFRM_NUM_MORE, + ATH10K_AMSDU_SUBFRM_NUM_MAX, +}; + +struct ath10k_sta_tid_stats { + unsigned long rx_pkt_from_fw; + unsigned long rx_pkt_unchained; + unsigned long rx_pkt_drop_chained; + unsigned long rx_pkt_drop_filter; + unsigned long rx_pkt_err[ATH10K_PKT_RX_ERR_MAX]; + unsigned long rx_pkt_queued_for_mac; + unsigned long rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_MAX]; + unsigned long rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_MAX]; +}; + +enum ath10k_counter_type { + ATH10K_COUNTER_TYPE_BYTES, + ATH10K_COUNTER_TYPE_PKTS, + ATH10K_COUNTER_TYPE_MAX, +}; + +enum ath10k_stats_type { + ATH10K_STATS_TYPE_SUCC, + ATH10K_STATS_TYPE_FAIL, + ATH10K_STATS_TYPE_RETRY, + ATH10K_STATS_TYPE_AMPDU, + ATH10K_STATS_TYPE_MAX, +}; + +struct ath10k_htt_data_stats { + u64 legacy[ATH10K_COUNTER_TYPE_MAX][ATH10K_LEGACY_NUM]; + u64 ht[ATH10K_COUNTER_TYPE_MAX][ATH10K_HT_MCS_NUM]; + u64 vht[ATH10K_COUNTER_TYPE_MAX][ATH10K_VHT_MCS_NUM]; + u64 bw[ATH10K_COUNTER_TYPE_MAX][ATH10K_BW_NUM]; + u64 nss[ATH10K_COUNTER_TYPE_MAX][ATH10K_NSS_NUM]; + u64 gi[ATH10K_COUNTER_TYPE_MAX][ATH10K_GI_NUM]; + u64 rate_table[ATH10K_COUNTER_TYPE_MAX][ATH10K_RATE_TABLE_NUM]; +}; + +struct ath10k_htt_tx_stats { + struct ath10k_htt_data_stats stats[ATH10K_STATS_TYPE_MAX]; + u64 tx_duration; + u64 ba_fails; + u64 ack_fails; +}; + +#define ATH10K_TID_MAX 8 + +struct ath10k_sta { + struct ath10k_vif *arvif; + + /* the following are protected by ar->data_lock */ + u32 changed; /* IEEE80211_RC_* */ + u32 bw; + u32 nss; + u32 smps; + u16 peer_id; + struct rate_info txrate; + struct ieee80211_tx_info tx_info; + u32 tx_retries; + u32 tx_failed; + u32 last_tx_bitrate; + + u32 rx_rate_code; + u32 rx_bitrate_kbps; + u32 tx_rate_code; + u32 tx_bitrate_kbps; + struct work_struct update_wk; + u64 rx_duration; + struct ath10k_htt_tx_stats *tx_stats; + u32 ucast_cipher; + +#ifdef CONFIG_MAC80211_DEBUGFS + /* protected by conf_mutex */ + bool aggr_mode; + + /* Protected with ar->data_lock */ + struct ath10k_sta_tid_stats tid_stats[IEEE80211_NUM_TIDS + 1]; +#endif + /* Protected with ar->data_lock */ + u32 peer_ps_state; + struct work_struct tid_config_wk; + int noack[ATH10K_TID_MAX]; + int retry_long[ATH10K_TID_MAX]; + int ampdu[ATH10K_TID_MAX]; + u8 rate_ctrl[ATH10K_TID_MAX]; + u32 rate_code[ATH10K_TID_MAX]; + int rtscts[ATH10K_TID_MAX]; +}; + +#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ) +#define ATH10K_VDEV_DELETE_TIMEOUT_HZ (5 * HZ) + +enum ath10k_beacon_state { + ATH10K_BEACON_SCHEDULED = 0, + ATH10K_BEACON_SENDING, + ATH10K_BEACON_SENT, +}; struct ath10k_vif { + struct list_head list; + u32 vdev_id; + u16 peer_id; enum wmi_vdev_type vdev_type; enum wmi_vdev_subtype vdev_subtype; u32 beacon_interval; u32 dtim_period; + struct sk_buff *beacon; + /* protected by data_lock */ + enum ath10k_beacon_state beacon_state; + void *beacon_buf; + dma_addr_t beacon_paddr; + unsigned long tx_paused; /* arbitrary values defined by target */ struct ath10k *ar; struct ieee80211_vif *vif; + bool is_started; + bool is_up; + bool spectral_enabled; + bool ps; + u32 aid; + u8 bssid[ETH_ALEN]; + struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1]; - u8 def_wep_key_index; + s8 def_wep_key_idx; u16 tx_seq_no; union { struct { - u8 bssid[ETH_ALEN]; u32 uapsd; } sta; struct { - /* 127 stations; wmi limit */ - u8 tim_bitmap[16]; + /* 512 stations */ + u8 tim_bitmap[64]; u8 tim_len; u32 ssid_len; - u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 ssid[IEEE80211_MAX_SSID_LEN] __nonstring; bool hidden_ssid; /* P2P_IE with NoA attribute for P2P_GO case */ u32 noa_len; u8 *noa_data; } ap; - struct { - u8 bssid[ETH_ALEN]; - } ibss; } u; + + bool use_cts_prot; + bool nohwcrypt; + int num_legacy_stations; + int txpower; + bool ftm_responder; + struct wmi_wmm_params_all_arg wmm_params; + struct work_struct ap_csa_work; + struct delayed_work connection_loss_work; + struct cfg80211_bitrate_mask bitrate_mask; + + /* For setting VHT peer fixed rate, protected by conf_mutex */ + int vht_num_rates; + u8 vht_pfr; + u32 tid_conf_changed[ATH10K_TID_MAX]; + int noack[ATH10K_TID_MAX]; + int retry_long[ATH10K_TID_MAX]; + int ampdu[ATH10K_TID_MAX]; + u8 rate_ctrl[ATH10K_TID_MAX]; + u32 rate_code[ATH10K_TID_MAX]; + int rtscts[ATH10K_TID_MAX]; + u32 tids_rst; }; struct ath10k_vif_iter { @@ -241,80 +647,481 @@ struct ath10k_vif_iter { struct ath10k_vif *arvif; }; +/* Copy Engine register dump, protected by ce-lock */ +struct ath10k_ce_crash_data { + __le32 base_addr; + __le32 src_wr_idx; + __le32 src_r_idx; + __le32 dst_wr_idx; + __le32 dst_r_idx; +}; + +struct ath10k_ce_crash_hdr { + __le32 ce_count; + __le32 reserved[3]; /* for future use */ + struct ath10k_ce_crash_data entries[]; +}; + +#define MAX_MEM_DUMP_TYPE 5 + +/* used for crash-dump storage, protected by data-lock */ +struct ath10k_fw_crash_data { + guid_t guid; + struct timespec64 timestamp; + __le32 registers[REG_DUMP_COUNT_QCA988X]; + struct ath10k_ce_crash_data ce_crash_data[CE_COUNT_MAX]; + + u8 *ramdump_buf; + size_t ramdump_buf_len; +}; + struct ath10k_debug { struct dentry *debugfs_phy; - struct ath10k_target_stats target_stats; - u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE]; + struct ath10k_fw_stats fw_stats; + struct completion fw_stats_complete; + bool fw_stats_done; + + unsigned long htt_stats_mask; + unsigned long reset_htt_stats; + struct delayed_work htt_stats_dwork; + struct ath10k_dfs_stats dfs_stats; + struct ath_dfs_pool_stats dfs_pool_stats; + + /* used for tpc-dump storage, protected by data-lock */ + struct ath10k_tpc_stats *tpc_stats; + struct ath10k_tpc_stats_final *tpc_stats_final; + + struct completion tpc_complete; + + /* protected by conf_mutex */ + u64 fw_dbglog_mask; + u32 fw_dbglog_level; + u32 reg_addr; + u32 nf_cal_period; + void *cal_data; + u32 enable_extd_tx_stats; + u8 fw_dbglog_mode; +}; + +enum ath10k_state { + ATH10K_STATE_OFF = 0, + ATH10K_STATE_ON, + + /* When doing firmware recovery the device is first powered down. + * mac80211 is supposed to call in to start() hook later on. It is + * however possible that driver unloading and firmware crash overlap. + * mac80211 can wait on conf_mutex in stop() while the device is + * stopped in ath10k_core_restart() work holding conf_mutex. The state + * RESTARTED means that the device is up and mac80211 has started hw + * reconfiguration. Once mac80211 is done with the reconfiguration we + * set the state to STATE_ON in reconfig_complete(). + */ + ATH10K_STATE_RESTARTING, + ATH10K_STATE_RESTARTED, + + /* The device has crashed while restarting hw. This state is like ON + * but commands are blocked in HTC and -ECOMM response is given. This + * prevents completion timeouts and makes the driver more responsive to + * userspace commands. This is also prevents recursive recovery. + */ + ATH10K_STATE_WEDGED, + + /* factory tests */ + ATH10K_STATE_UTF, +}; + +enum ath10k_firmware_mode { + /* the default mode, standard 802.11 functionality */ + ATH10K_FIRMWARE_MODE_NORMAL, + + /* factory tests etc */ + ATH10K_FIRMWARE_MODE_UTF, +}; + +enum ath10k_fw_features { + /* wmi_mgmt_rx_hdr contains extra RSSI information */ + ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0, + + /* Firmware from 10X branch. Deprecated, don't use in new code. */ + ATH10K_FW_FEATURE_WMI_10X = 1, + + /* firmware support tx frame management over WMI, otherwise it's HTT */ + ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX = 2, + + /* Firmware does not support P2P */ + ATH10K_FW_FEATURE_NO_P2P = 3, + + /* Firmware 10.2 feature bit. The ATH10K_FW_FEATURE_WMI_10X feature + * bit is required to be set as well. Deprecated, don't use in new + * code. + */ + ATH10K_FW_FEATURE_WMI_10_2 = 4, + + /* Some firmware revisions lack proper multi-interface client powersave + * implementation. Enabling PS could result in connection drops, + * traffic stalls, etc. + */ + ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT = 5, + + /* Some firmware revisions have an incomplete WoWLAN implementation + * despite WMI service bit being advertised. This feature flag is used + * to distinguish whether WoWLAN is really supported or not. + */ + ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6, + + /* Don't trust error code from otp.bin */ + ATH10K_FW_FEATURE_IGNORE_OTP_RESULT = 7, + + /* Some firmware revisions pad 4th hw address to 4 byte boundary making + * it 8 bytes long in Native Wifi Rx decap. + */ + ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING = 8, + + /* Firmware supports bypassing PLL setting on init. */ + ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT = 9, + + /* Raw mode support. If supported, FW supports receiving and transmitting + * frames in raw mode. + */ + ATH10K_FW_FEATURE_RAW_MODE_SUPPORT = 10, + + /* Firmware Supports Adaptive CCA*/ + ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA = 11, + + /* Firmware supports management frame protection */ + ATH10K_FW_FEATURE_MFP_SUPPORT = 12, + + /* Firmware supports pull-push model where host shares it's software + * queue state with firmware and firmware generates fetch requests + * telling host which queues to dequeue tx from. + * + * Primary function of this is improved MU-MIMO performance with + * multiple clients. + */ + ATH10K_FW_FEATURE_PEER_FLOW_CONTROL = 13, + + /* Firmware supports BT-Coex without reloading firmware via pdev param. + * To support Bluetooth coexistence pdev param, WMI_COEX_GPIO_SUPPORT of + * extended resource config should be enabled always. This firmware IE + * is used to configure WMI_COEX_GPIO_SUPPORT. + */ + ATH10K_FW_FEATURE_BTCOEX_PARAM = 14, + + /* Unused flag and proven to be not working, enable this if you want + * to experiment sending NULL func data frames in HTT TX + */ + ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR = 15, + + /* Firmware allow other BSS mesh broadcast/multicast frames without + * creating monitor interface. Appropriate rxfilters are programmed for + * mesh vdev by firmware itself. This feature flags will be used for + * not creating monitor vdev while configuring mesh node. + */ + ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST = 16, + + /* Firmware does not support power save in station mode. */ + ATH10K_FW_FEATURE_NO_PS = 17, + + /* Firmware allows management tx by reference instead of by value. */ + ATH10K_FW_FEATURE_MGMT_TX_BY_REF = 18, + + /* Firmware load is done externally, not by bmi */ + ATH10K_FW_FEATURE_NON_BMI = 19, + + /* Firmware sends only one chan_info event per channel */ + ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL = 20, + + /* Firmware allows setting peer fixed rate */ + ATH10K_FW_FEATURE_PEER_FIXED_RATE = 21, + + /* Firmware support IRAM recovery */ + ATH10K_FW_FEATURE_IRAM_RECOVERY = 22, + + /* keep last */ + ATH10K_FW_FEATURE_COUNT, +}; + +enum ath10k_dev_flags { + /* Indicates that ath10k device is during CAC phase of DFS */ + ATH10K_CAC_RUNNING, + ATH10K_FLAG_CORE_REGISTERED, + + /* Device has crashed and needs to restart. This indicates any pending + * waiters should immediately cancel instead of waiting for a time out. + */ + ATH10K_FLAG_CRASH_FLUSH, + + /* Use Raw mode instead of native WiFi Tx/Rx encap mode. + * Raw mode supports both hardware and software crypto. Native WiFi only + * supports hardware crypto. + */ + ATH10K_FLAG_RAW_MODE, + + /* Disable HW crypto engine */ + ATH10K_FLAG_HW_CRYPTO_DISABLED, + + /* Bluetooth coexistence enabled */ + ATH10K_FLAG_BTCOEX, + + /* Per Station statistics service */ + ATH10K_FLAG_PEER_STATS, + + /* protected by conf_mutex */ + ATH10K_FLAG_NAPI_ENABLED, +}; + +enum ath10k_cal_mode { + ATH10K_CAL_MODE_FILE, + ATH10K_CAL_MODE_OTP, + ATH10K_CAL_MODE_DT, + ATH10K_CAL_MODE_NVMEM, + ATH10K_PRE_CAL_MODE_FILE, + ATH10K_PRE_CAL_MODE_DT, + ATH10K_PRE_CAL_MODE_NVMEM, + ATH10K_CAL_MODE_EEPROM, +}; + +enum ath10k_crypt_mode { + /* Only use hardware crypto engine */ + ATH10K_CRYPT_MODE_HW, + /* Only use software crypto engine */ + ATH10K_CRYPT_MODE_SW, +}; + +static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode) +{ + switch (mode) { + case ATH10K_CAL_MODE_FILE: + return "file"; + case ATH10K_CAL_MODE_OTP: + return "otp"; + case ATH10K_CAL_MODE_DT: + return "dt"; + case ATH10K_CAL_MODE_NVMEM: + return "nvmem"; + case ATH10K_PRE_CAL_MODE_FILE: + return "pre-cal-file"; + case ATH10K_PRE_CAL_MODE_DT: + return "pre-cal-dt"; + case ATH10K_PRE_CAL_MODE_NVMEM: + return "pre-cal-nvmem"; + case ATH10K_CAL_MODE_EEPROM: + return "eeprom"; + } + + return "unknown"; +} + +enum ath10k_scan_state { + ATH10K_SCAN_IDLE, + ATH10K_SCAN_STARTING, + ATH10K_SCAN_RUNNING, + ATH10K_SCAN_ABORTING, +}; + +static inline const char *ath10k_scan_state_str(enum ath10k_scan_state state) +{ + switch (state) { + case ATH10K_SCAN_IDLE: + return "idle"; + case ATH10K_SCAN_STARTING: + return "starting"; + case ATH10K_SCAN_RUNNING: + return "running"; + case ATH10K_SCAN_ABORTING: + return "aborting"; + } + + return "unknown"; +} + +enum ath10k_tx_pause_reason { + ATH10K_TX_PAUSE_Q_FULL, + ATH10K_TX_PAUSE_MAX, +}; + +struct ath10k_fw_file { + const struct firmware *firmware; + + char fw_version[ETHTOOL_FWVERS_LEN]; + + DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT); + + enum ath10k_fw_wmi_op_version wmi_op_version; + enum ath10k_fw_htt_op_version htt_op_version; + + const void *firmware_data; + size_t firmware_len; + + const void *otp_data; + size_t otp_len; + + const void *codeswap_data; + size_t codeswap_len; + + /* The original idea of struct ath10k_fw_file was that it only + * contains struct firmware and pointers to various parts (actual + * firmware binary, otp, metadata etc) of the file. This seg_info + * is actually created separate but as this is used similarly as + * the other firmware components it's more convenient to have it + * here. + */ + struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info; +}; + +struct ath10k_fw_components { + const struct firmware *board; + const void *board_data; + size_t board_len; + const struct firmware *ext_board; + const void *ext_board_data; + size_t ext_board_len; + + struct ath10k_fw_file fw_file; +}; + +struct ath10k_per_peer_tx_stats { + u32 succ_bytes; + u32 retry_bytes; + u32 failed_bytes; + u8 ratecode; + u8 flags; + u16 peer_id; + u16 succ_pkts; + u16 retry_pkts; + u16 failed_pkts; + u16 duration; + u32 reserved1; + u32 reserved2; +}; + +enum ath10k_dev_type { + ATH10K_DEV_TYPE_LL, + ATH10K_DEV_TYPE_HL, +}; - struct completion event_stats_compl; +struct ath10k_bus_params { + u32 chip_id; + enum ath10k_dev_type dev_type; + bool link_can_suspend; + bool hl_msdu_ids; }; struct ath10k { struct ath_common ath_common; struct ieee80211_hw *hw; + struct ieee80211_ops *ops; struct device *dev; + struct msa_region { + dma_addr_t paddr; + u32 mem_size; + void *vaddr; + } msa; u8 mac_addr[ETH_ALEN]; + enum ath10k_hw_rev hw_rev; + u16 dev_id; + u32 chip_id; u32 target_version; u8 fw_version_major; u32 fw_version_minor; u16 fw_version_release; u16 fw_version_build; + u32 fw_stats_req_mask; u32 phy_capability; u32 hw_min_tx_power; u32 hw_max_tx_power; + u32 hw_eeprom_rd; u32 ht_cap_info; u32 vht_cap_info; - - struct targetdef *targetdef; - struct hostdef *hostdef; - + u32 vht_supp_mcs; + u32 num_rf_chains; + u32 max_spatial_stream; + /* protected by conf_mutex */ + u32 low_2ghz_chan; + u32 high_2ghz_chan; + u32 low_5ghz_chan; + u32 high_5ghz_chan; + bool ani_enabled; + u32 sys_cap_info; + + /* protected by data_lock */ + bool hw_rfkill_on; + + /* protected by conf_mutex */ + u8 ps_state_enable; + + bool nlo_enabled; bool p2p; struct { - void *priv; enum ath10k_bus bus; const struct ath10k_hif_ops *ops; } hif; + struct completion target_suspend; + struct completion driver_recovery; + + const struct ath10k_hw_regs *regs; + const struct ath10k_hw_ce_regs *hw_ce_regs; + const struct ath10k_hw_values *hw_values; + struct ath10k_bmi bmi; struct ath10k_wmi wmi; + struct ath10k_htc htc; + struct ath10k_htt htt; - wait_queue_head_t event_queue; - bool is_target_paused; + struct ath10k_hw_params hw_params; - struct ath10k_bmi bmi; + /* contains the firmware images used with ATH10K_FIRMWARE_MODE_NORMAL */ + struct ath10k_fw_components normal_mode_fw; + + /* READ-ONLY images of the running firmware, which can be either + * normal or UTF. Do not modify, release etc! + */ + const struct ath10k_fw_components *running_fw; - struct ath10k_htc *htc; - struct ath10k_htt *htt; + const char *board_name; - struct ath10k_hw_params { - u32 id; - const char *name; - u32 patch_load_addr; + const struct firmware *pre_cal_file; + const struct firmware *cal_file; - struct ath10k_hw_params_fw { - const char *dir; - const char *fw; - const char *otp; - const char *board; - } fw; - } hw_params; + struct { + u32 vendor; + u32 device; + u32 subsystem_vendor; + u32 subsystem_device; + + bool bmi_ids_valid; + bool qmi_ids_valid; + u32 qmi_board_id; + u32 qmi_chip_id; + u8 bmi_board_id; + u8 bmi_eboard_id; + u8 bmi_chip_id; + bool ext_bid_supported; + + char bdf_ext[ATH10K_SMBIOS_BDF_EXT_STR_LENGTH]; + } id; + + int fw_api; + int bd_api; + enum ath10k_cal_mode cal_mode; struct { struct completion started; struct completion completed; struct completion on_channel; - struct timer_list timeout; + struct delayed_work timeout; + enum ath10k_scan_state state; bool is_roc; - bool in_progress; - bool aborting; int vdev_id; int roc_freq; + bool roc_notify; } scan; struct { - struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; + struct ieee80211_supported_band sbands[NUM_NL80211_BANDS]; } mac; /* should never be NULL; needed for regular htt rx */ @@ -323,47 +1130,239 @@ struct ath10k { /* valid during scan; needed for mgmt rx during scan */ struct ieee80211_channel *scan_channel; - int free_vdev_map; + /* current operating channel definition */ + struct cfg80211_chan_def chandef; + + /* currently configured operating channel in firmware */ + struct ieee80211_channel *tgt_oper_chan; + + unsigned long long free_vdev_map; + struct ath10k_vif *monitor_arvif; + bool monitor; int monitor_vdev_id; - bool monitor_enabled; - bool monitor_present; + bool monitor_started; unsigned int filter_flags; + unsigned long dev_flags; + bool dfs_block_radar_events; + + /* protected by conf_mutex */ + bool radar_enabled; + int num_started_vdevs; + + /* Protected by conf-mutex */ + u8 cfg_tx_chainmask; + u8 cfg_rx_chainmask; - struct wmi_pdev_set_wmm_params_arg wmm_params; struct completion install_key_done; + int last_wmi_vdev_start_status; struct completion vdev_setup_done; + struct completion vdev_delete_done; + struct completion peer_stats_info_complete; struct workqueue_struct *workqueue; - + /* Auxiliary workqueue */ + struct workqueue_struct *workqueue_aux; + struct workqueue_struct *workqueue_tx_complete; /* prevents concurrent FW reconfiguration */ struct mutex conf_mutex; + /* protects coredump data */ + struct mutex dump_mutex; + /* protects shared structure data */ spinlock_t data_lock; + /* serialize wake_tx_queue calls per ac */ + spinlock_t queue_lock[IEEE80211_NUM_ACS]; + + struct list_head arvifs; struct list_head peers; + struct ath10k_peer *peer_map[ATH10K_MAX_NUM_PEER_IDS]; wait_queue_head_t peer_mapping_wq; + /* protected by conf_mutex */ + int num_peers; + int num_stations; + + int max_num_peers; + int max_num_stations; + int max_num_vdevs; + int max_num_tdls_vdevs; + int num_active_peers; + int num_tids; + + struct work_struct svc_rdy_work; + struct sk_buff *svc_rdy_skb; + struct work_struct offchan_tx_work; struct sk_buff_head offchan_tx_queue; struct completion offchan_tx_completed; struct sk_buff *offchan_tx_skb; + struct work_struct wmi_mgmt_tx_work; + struct sk_buff_head wmi_mgmt_tx_queue; + + enum ath10k_state state; + + struct work_struct register_work; + struct work_struct restart_work; + struct work_struct recovery_check_work; + struct work_struct bundle_tx_work; + struct work_struct tx_complete_work; + + atomic_t pending_recovery; + unsigned int recovery_count; + /* continuous recovery fail count */ + atomic_t fail_cont_count; + + /* cycle count is reported twice for each visited channel during scan. + * access protected by data_lock + */ + u32 survey_last_rx_clear_count; + u32 survey_last_cycle_count; + struct survey_info survey[ATH10K_NUM_CHANS]; + + /* Channel info events are expected to come in pairs without and with + * COMPLETE flag set respectively for each channel visit during scan. + * + * However there are deviations from this rule. This flag is used to + * avoid reporting garbage data. + */ + bool ch_info_can_report_survey; + struct completion bss_survey_done; + + struct dfs_pattern_detector *dfs_detector; + + unsigned long tx_paused; /* see ATH10K_TX_PAUSE_ */ + #ifdef CONFIG_ATH10K_DEBUGFS struct ath10k_debug debug; + struct { + /* relay(fs) channel for spectral scan */ + struct rchan *rfs_chan_spec_scan; + + /* spectral_mode and spec_config are protected by conf_mutex */ + enum ath10k_spectral_mode mode; + struct ath10k_spec_scan config; + } spectral; #endif + + u32 pktlog_filter; + +#ifdef CONFIG_DEV_COREDUMP + struct { + struct ath10k_fw_crash_data *fw_crash_data; + } coredump; +#endif + + struct { + /* protected by conf_mutex */ + struct ath10k_fw_components utf_mode_fw; + u8 ftm_msgref; + + /* protected by data_lock */ + bool utf_monitor; + u32 data_pos; + u32 expected_seq; + u8 *eventdata; + } testmode; + + struct { + struct gpio_led wifi_led; + struct led_classdev cdev; + char label[48]; + u32 gpio_state_pin; + } leds; + + struct { + /* protected by data_lock */ + u32 rx_crc_err_drop; + u32 fw_crash_counter; + u32 fw_warm_reset_counter; + u32 fw_cold_reset_counter; + } stats; + + struct ath10k_thermal thermal; + struct ath10k_wow wow; + struct ath10k_per_peer_tx_stats peer_tx_stats; + + /* NAPI */ + struct net_device *napi_dev; + struct napi_struct napi; + + struct work_struct set_coverage_class_work; + /* protected by conf_mutex */ + struct { + /* writing also protected by data_lock */ + s16 coverage_class; + + u32 reg_phyclk; + u32 reg_slottime_conf; + u32 reg_slottime_orig; + u32 reg_ack_cts_timeout_conf; + u32 reg_ack_cts_timeout_orig; + } fw_coverage; + + u32 ampdu_reference; + + const u8 *wmi_key_cipher; + void *ce_priv; + + u32 sta_tid_stats_mask; + + /* protected by data_lock */ + enum ath10k_radar_confirmation_state radar_conf_state; + struct ath10k_radar_found_info last_radar_info; + struct work_struct radar_confirmation_work; + struct ath10k_bus_params bus_param; + struct completion peer_delete_done; + + bool coex_support; + int coex_gpio_pin; + + s32 tx_power_2g_limit; + s32 tx_power_5g_limit; + + /* must be last */ + u8 drv_priv[] __aligned(sizeof(void *)); }; -struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, +static inline bool ath10k_peer_stats_enabled(struct ath10k *ar) +{ + if (test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) && + test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) + return true; + + return false; +} + +extern unsigned int ath10k_frame_mode; +extern unsigned long ath10k_coredump_mask; + +void ath10k_core_napi_sync_disable(struct ath10k *ar); +void ath10k_core_napi_enable(struct ath10k *ar); +struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, enum ath10k_bus bus, + enum ath10k_hw_rev hw_rev, const struct ath10k_hif_ops *hif_ops); void ath10k_core_destroy(struct ath10k *ar); - -int ath10k_core_register(struct ath10k *ar); +void ath10k_core_get_fw_features_str(struct ath10k *ar, + char *buf, + size_t max_len); +int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name, + struct ath10k_fw_file *fw_file); + +int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, + const struct ath10k_fw_components *fw_components); +int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt); +void ath10k_core_stop(struct ath10k *ar); +void ath10k_core_start_recovery(struct ath10k *ar); +int ath10k_core_register(struct ath10k *ar, + const struct ath10k_bus_params *bus_params); void ath10k_core_unregister(struct ath10k *ar); - -int ath10k_core_target_suspend(struct ath10k *ar); -int ath10k_core_target_resume(struct ath10k *ar); +int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type); +int ath10k_core_check_dt(struct ath10k *ar); +void ath10k_core_free_board_files(struct ath10k *ar); #endif /* _CORE_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c new file mode 100644 index 000000000000..50d0c4213ecf --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/coredump.c @@ -0,0 +1,1666 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + +#include "coredump.h" + +#include <linux/devcoredump.h> +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/utsname.h> + +#include "debug.h" +#include "hw.h" + +static const struct ath10k_mem_section qca6174_hw21_register_sections[] = { + {0x800, 0x810}, + {0x820, 0x82C}, + {0x830, 0x8F4}, + {0x90C, 0x91C}, + {0xA14, 0xA18}, + {0xA84, 0xA94}, + {0xAA8, 0xAD4}, + {0xADC, 0xB40}, + {0x1000, 0x10A4}, + {0x10BC, 0x111C}, + {0x1134, 0x1138}, + {0x1144, 0x114C}, + {0x1150, 0x115C}, + {0x1160, 0x1178}, + {0x1240, 0x1260}, + {0x2000, 0x207C}, + {0x3000, 0x3014}, + {0x4000, 0x4014}, + {0x5000, 0x5124}, + {0x6000, 0x6040}, + {0x6080, 0x60CC}, + {0x6100, 0x611C}, + {0x6140, 0x61D8}, + {0x6200, 0x6238}, + {0x6240, 0x628C}, + {0x62C0, 0x62EC}, + {0x6380, 0x63E8}, + {0x6400, 0x6440}, + {0x6480, 0x64CC}, + {0x6500, 0x651C}, + {0x6540, 0x6580}, + {0x6600, 0x6638}, + {0x6640, 0x668C}, + {0x66C0, 0x66EC}, + {0x6780, 0x67E8}, + {0x7080, 0x708C}, + {0x70C0, 0x70C8}, + {0x7400, 0x741C}, + {0x7440, 0x7454}, + {0x7800, 0x7818}, + {0x8000, 0x8004}, + {0x8010, 0x8064}, + {0x8080, 0x8084}, + {0x80A0, 0x80A4}, + {0x80C0, 0x80C4}, + {0x80E0, 0x80F4}, + {0x8100, 0x8104}, + {0x8110, 0x812C}, + {0x9000, 0x9004}, + {0x9800, 0x982C}, + {0x9830, 0x9838}, + {0x9840, 0x986C}, + {0x9870, 0x9898}, + {0x9A00, 0x9C00}, + {0xD580, 0xD59C}, + {0xF000, 0xF0E0}, + {0xF140, 0xF190}, + {0xF250, 0xF25C}, + {0xF260, 0xF268}, + {0xF26C, 0xF2A8}, + {0x10008, 0x1000C}, + {0x10014, 0x10018}, + {0x1001C, 0x10020}, + {0x10024, 0x10028}, + {0x10030, 0x10034}, + {0x10040, 0x10054}, + {0x10058, 0x1007C}, + {0x10080, 0x100C4}, + {0x100C8, 0x10114}, + {0x1012C, 0x10130}, + {0x10138, 0x10144}, + {0x10200, 0x10220}, + {0x10230, 0x10250}, + {0x10260, 0x10280}, + {0x10290, 0x102B0}, + {0x102C0, 0x102DC}, + {0x102E0, 0x102F4}, + {0x102FC, 0x1037C}, + {0x10380, 0x10390}, + {0x10800, 0x10828}, + {0x10840, 0x10844}, + {0x10880, 0x10884}, + {0x108C0, 0x108E8}, + {0x10900, 0x10928}, + {0x10940, 0x10944}, + {0x10980, 0x10984}, + {0x109C0, 0x109E8}, + {0x10A00, 0x10A28}, + {0x10A40, 0x10A50}, + {0x11000, 0x11028}, + {0x11030, 0x11034}, + {0x11038, 0x11068}, + {0x11070, 0x11074}, + {0x11078, 0x110A8}, + {0x110B0, 0x110B4}, + {0x110B8, 0x110E8}, + {0x110F0, 0x110F4}, + {0x110F8, 0x11128}, + {0x11138, 0x11144}, + {0x11178, 0x11180}, + {0x111B8, 0x111C0}, + {0x111F8, 0x11200}, + {0x11238, 0x1123C}, + {0x11270, 0x11274}, + {0x11278, 0x1127C}, + {0x112B0, 0x112B4}, + {0x112B8, 0x112BC}, + {0x112F0, 0x112F4}, + {0x112F8, 0x112FC}, + {0x11338, 0x1133C}, + {0x11378, 0x1137C}, + {0x113B8, 0x113BC}, + {0x113F8, 0x113FC}, + {0x11438, 0x11440}, + {0x11478, 0x11480}, + {0x114B8, 0x114BC}, + {0x114F8, 0x114FC}, + {0x11538, 0x1153C}, + {0x11578, 0x1157C}, + {0x115B8, 0x115BC}, + {0x115F8, 0x115FC}, + {0x11638, 0x1163C}, + {0x11678, 0x1167C}, + {0x116B8, 0x116BC}, + {0x116F8, 0x116FC}, + {0x11738, 0x1173C}, + {0x11778, 0x1177C}, + {0x117B8, 0x117BC}, + {0x117F8, 0x117FC}, + {0x17000, 0x1701C}, + {0x17020, 0x170AC}, + {0x18000, 0x18050}, + {0x18054, 0x18074}, + {0x18080, 0x180D4}, + {0x180DC, 0x18104}, + {0x18108, 0x1813C}, + {0x18144, 0x18148}, + {0x18168, 0x18174}, + {0x18178, 0x18180}, + {0x181C8, 0x181E0}, + {0x181E4, 0x181E8}, + {0x181EC, 0x1820C}, + {0x1825C, 0x18280}, + {0x18284, 0x18290}, + {0x18294, 0x182A0}, + {0x18300, 0x18304}, + {0x18314, 0x18320}, + {0x18328, 0x18350}, + {0x1835C, 0x1836C}, + {0x18370, 0x18390}, + {0x18398, 0x183AC}, + {0x183BC, 0x183D8}, + {0x183DC, 0x183F4}, + {0x18400, 0x186F4}, + {0x186F8, 0x1871C}, + {0x18720, 0x18790}, + {0x19800, 0x19830}, + {0x19834, 0x19840}, + {0x19880, 0x1989C}, + {0x198A4, 0x198B0}, + {0x198BC, 0x19900}, + {0x19C00, 0x19C88}, + {0x19D00, 0x19D20}, + {0x19E00, 0x19E7C}, + {0x19E80, 0x19E94}, + {0x19E98, 0x19EAC}, + {0x19EB0, 0x19EBC}, + {0x19F70, 0x19F74}, + {0x19F80, 0x19F8C}, + {0x19FA0, 0x19FB4}, + {0x19FC0, 0x19FD8}, + {0x1A000, 0x1A200}, + {0x1A204, 0x1A210}, + {0x1A228, 0x1A22C}, + {0x1A230, 0x1A248}, + {0x1A250, 0x1A270}, + {0x1A280, 0x1A290}, + {0x1A2A0, 0x1A2A4}, + {0x1A2C0, 0x1A2EC}, + {0x1A300, 0x1A3BC}, + {0x1A3F0, 0x1A3F4}, + {0x1A3F8, 0x1A434}, + {0x1A438, 0x1A444}, + {0x1A448, 0x1A468}, + {0x1A580, 0x1A58C}, + {0x1A644, 0x1A654}, + {0x1A670, 0x1A698}, + {0x1A6AC, 0x1A6B0}, + {0x1A6D0, 0x1A6D4}, + {0x1A6EC, 0x1A70C}, + {0x1A710, 0x1A738}, + {0x1A7C0, 0x1A7D0}, + {0x1A7D4, 0x1A7D8}, + {0x1A7DC, 0x1A7E4}, + {0x1A7F0, 0x1A7F8}, + {0x1A888, 0x1A89C}, + {0x1A8A8, 0x1A8AC}, + {0x1A8C0, 0x1A8DC}, + {0x1A8F0, 0x1A8FC}, + {0x1AE04, 0x1AE08}, + {0x1AE18, 0x1AE24}, + {0x1AF80, 0x1AF8C}, + {0x1AFA0, 0x1AFB4}, + {0x1B000, 0x1B200}, + {0x1B284, 0x1B288}, + {0x1B2D0, 0x1B2D8}, + {0x1B2DC, 0x1B2EC}, + {0x1B300, 0x1B340}, + {0x1B374, 0x1B378}, + {0x1B380, 0x1B384}, + {0x1B388, 0x1B38C}, + {0x1B404, 0x1B408}, + {0x1B420, 0x1B428}, + {0x1B440, 0x1B444}, + {0x1B448, 0x1B44C}, + {0x1B450, 0x1B458}, + {0x1B45C, 0x1B468}, + {0x1B584, 0x1B58C}, + {0x1B68C, 0x1B690}, + {0x1B6AC, 0x1B6B0}, + {0x1B7F0, 0x1B7F8}, + {0x1C800, 0x1CC00}, + {0x1CE00, 0x1CE04}, + {0x1CF80, 0x1CF84}, + {0x1D200, 0x1D800}, + {0x1E000, 0x20014}, + {0x20100, 0x20124}, + {0x21400, 0x217A8}, + {0x21800, 0x21BA8}, + {0x21C00, 0x21FA8}, + {0x22000, 0x223A8}, + {0x22400, 0x227A8}, + {0x22800, 0x22BA8}, + {0x22C00, 0x22FA8}, + {0x23000, 0x233A8}, + {0x24000, 0x24034}, + {0x26000, 0x26064}, + {0x27000, 0x27024}, + {0x34000, 0x3400C}, + {0x34400, 0x3445C}, + {0x34800, 0x3485C}, + {0x34C00, 0x34C5C}, + {0x35000, 0x3505C}, + {0x35400, 0x3545C}, + {0x35800, 0x3585C}, + {0x35C00, 0x35C5C}, + {0x36000, 0x3605C}, + {0x38000, 0x38064}, + {0x38070, 0x380E0}, + {0x3A000, 0x3A064}, + {0x40000, 0x400A4}, + {0x80000, 0x8000C}, + {0x80010, 0x80020}, +}; + +static const struct ath10k_mem_section qca6174_hw30_sdio_register_sections[] = { + {0x800, 0x810}, + {0x820, 0x82C}, + {0x830, 0x8F4}, + {0x90C, 0x91C}, + {0xA14, 0xA18}, + {0xA84, 0xA94}, + {0xAA8, 0xAD4}, + {0xADC, 0xB40}, + {0x1000, 0x10A4}, + {0x10BC, 0x111C}, + {0x1134, 0x1138}, + {0x1144, 0x114C}, + {0x1150, 0x115C}, + {0x1160, 0x1178}, + {0x1240, 0x1260}, + {0x2000, 0x207C}, + {0x3000, 0x3014}, + {0x4000, 0x4014}, + {0x5000, 0x5124}, + {0x6000, 0x6040}, + {0x6080, 0x60CC}, + {0x6100, 0x611C}, + {0x6140, 0x61D8}, + {0x6200, 0x6238}, + {0x6240, 0x628C}, + {0x62C0, 0x62EC}, + {0x6380, 0x63E8}, + {0x6400, 0x6440}, + {0x6480, 0x64CC}, + {0x6500, 0x651C}, + {0x6540, 0x6580}, + {0x6600, 0x6638}, + {0x6640, 0x668C}, + {0x66C0, 0x66EC}, + {0x6780, 0x67E8}, + {0x7080, 0x708C}, + {0x70C0, 0x70C8}, + {0x7400, 0x741C}, + {0x7440, 0x7454}, + {0x7800, 0x7818}, + {0x8010, 0x8060}, + {0x8080, 0x8084}, + {0x80A0, 0x80A4}, + {0x80C0, 0x80C4}, + {0x80E0, 0x80ec}, + {0x8110, 0x8128}, + {0x9000, 0x9004}, + {0xF000, 0xF0E0}, + {0xF140, 0xF190}, + {0xF250, 0xF25C}, + {0xF260, 0xF268}, + {0xF26C, 0xF2A8}, + {0x10008, 0x1000C}, + {0x10014, 0x10018}, + {0x1001C, 0x10020}, + {0x10024, 0x10028}, + {0x10030, 0x10034}, + {0x10040, 0x10054}, + {0x10058, 0x1007C}, + {0x10080, 0x100C4}, + {0x100C8, 0x10114}, + {0x1012C, 0x10130}, + {0x10138, 0x10144}, + {0x10200, 0x10220}, + {0x10230, 0x10250}, + {0x10260, 0x10280}, + {0x10290, 0x102B0}, + {0x102C0, 0x102DC}, + {0x102E0, 0x102F4}, + {0x102FC, 0x1037C}, + {0x10380, 0x10390}, + {0x10800, 0x10828}, + {0x10840, 0x10844}, + {0x10880, 0x10884}, + {0x108C0, 0x108E8}, + {0x10900, 0x10928}, + {0x10940, 0x10944}, + {0x10980, 0x10984}, + {0x109C0, 0x109E8}, + {0x10A00, 0x10A28}, + {0x10A40, 0x10A50}, + {0x11000, 0x11028}, + {0x11030, 0x11034}, + {0x11038, 0x11068}, + {0x11070, 0x11074}, + {0x11078, 0x110A8}, + {0x110B0, 0x110B4}, + {0x110B8, 0x110E8}, + {0x110F0, 0x110F4}, + {0x110F8, 0x11128}, + {0x11138, 0x11144}, + {0x11178, 0x11180}, + {0x111B8, 0x111C0}, + {0x111F8, 0x11200}, + {0x11238, 0x1123C}, + {0x11270, 0x11274}, + {0x11278, 0x1127C}, + {0x112B0, 0x112B4}, + {0x112B8, 0x112BC}, + {0x112F0, 0x112F4}, + {0x112F8, 0x112FC}, + {0x11338, 0x1133C}, + {0x11378, 0x1137C}, + {0x113B8, 0x113BC}, + {0x113F8, 0x113FC}, + {0x11438, 0x11440}, + {0x11478, 0x11480}, + {0x114B8, 0x114BC}, + {0x114F8, 0x114FC}, + {0x11538, 0x1153C}, + {0x11578, 0x1157C}, + {0x115B8, 0x115BC}, + {0x115F8, 0x115FC}, + {0x11638, 0x1163C}, + {0x11678, 0x1167C}, + {0x116B8, 0x116BC}, + {0x116F8, 0x116FC}, + {0x11738, 0x1173C}, + {0x11778, 0x1177C}, + {0x117B8, 0x117BC}, + {0x117F8, 0x117FC}, + {0x17000, 0x1701C}, + {0x17020, 0x170AC}, + {0x18000, 0x18050}, + {0x18054, 0x18074}, + {0x18080, 0x180D4}, + {0x180DC, 0x18104}, + {0x18108, 0x1813C}, + {0x18144, 0x18148}, + {0x18168, 0x18174}, + {0x18178, 0x18180}, + {0x181C8, 0x181E0}, + {0x181E4, 0x181E8}, + {0x181EC, 0x1820C}, + {0x1825C, 0x18280}, + {0x18284, 0x18290}, + {0x18294, 0x182A0}, + {0x18300, 0x18304}, + {0x18314, 0x18320}, + {0x18328, 0x18350}, + {0x1835C, 0x1836C}, + {0x18370, 0x18390}, + {0x18398, 0x183AC}, + {0x183BC, 0x183D8}, + {0x183DC, 0x183F4}, + {0x18400, 0x186F4}, + {0x186F8, 0x1871C}, + {0x18720, 0x18790}, + {0x19800, 0x19830}, + {0x19834, 0x19840}, + {0x19880, 0x1989C}, + {0x198A4, 0x198B0}, + {0x198BC, 0x19900}, + {0x19C00, 0x19C88}, + {0x19D00, 0x19D20}, + {0x19E00, 0x19E7C}, + {0x19E80, 0x19E94}, + {0x19E98, 0x19EAC}, + {0x19EB0, 0x19EBC}, + {0x19F70, 0x19F74}, + {0x19F80, 0x19F8C}, + {0x19FA0, 0x19FB4}, + {0x19FC0, 0x19FD8}, + {0x1A000, 0x1A200}, + {0x1A204, 0x1A210}, + {0x1A228, 0x1A22C}, + {0x1A230, 0x1A248}, + {0x1A250, 0x1A270}, + {0x1A280, 0x1A290}, + {0x1A2A0, 0x1A2A4}, + {0x1A2C0, 0x1A2EC}, + {0x1A300, 0x1A3BC}, + {0x1A3F0, 0x1A3F4}, + {0x1A3F8, 0x1A434}, + {0x1A438, 0x1A444}, + {0x1A448, 0x1A468}, + {0x1A580, 0x1A58C}, + {0x1A644, 0x1A654}, + {0x1A670, 0x1A698}, + {0x1A6AC, 0x1A6B0}, + {0x1A6D0, 0x1A6D4}, + {0x1A6EC, 0x1A70C}, + {0x1A710, 0x1A738}, + {0x1A7C0, 0x1A7D0}, + {0x1A7D4, 0x1A7D8}, + {0x1A7DC, 0x1A7E4}, + {0x1A7F0, 0x1A7F8}, + {0x1A888, 0x1A89C}, + {0x1A8A8, 0x1A8AC}, + {0x1A8C0, 0x1A8DC}, + {0x1A8F0, 0x1A8FC}, + {0x1AE04, 0x1AE08}, + {0x1AE18, 0x1AE24}, + {0x1AF80, 0x1AF8C}, + {0x1AFA0, 0x1AFB4}, + {0x1B000, 0x1B200}, + {0x1B284, 0x1B288}, + {0x1B2D0, 0x1B2D8}, + {0x1B2DC, 0x1B2EC}, + {0x1B300, 0x1B340}, + {0x1B374, 0x1B378}, + {0x1B380, 0x1B384}, + {0x1B388, 0x1B38C}, + {0x1B404, 0x1B408}, + {0x1B420, 0x1B428}, + {0x1B440, 0x1B444}, + {0x1B448, 0x1B44C}, + {0x1B450, 0x1B458}, + {0x1B45C, 0x1B468}, + {0x1B584, 0x1B58C}, + {0x1B68C, 0x1B690}, + {0x1B6AC, 0x1B6B0}, + {0x1B7F0, 0x1B7F8}, + {0x1C800, 0x1CC00}, + {0x1CE00, 0x1CE04}, + {0x1CF80, 0x1CF84}, + {0x1D200, 0x1D800}, + {0x1E000, 0x20014}, + {0x20100, 0x20124}, + {0x21400, 0x217A8}, + {0x21800, 0x21BA8}, + {0x21C00, 0x21FA8}, + {0x22000, 0x223A8}, + {0x22400, 0x227A8}, + {0x22800, 0x22BA8}, + {0x22C00, 0x22FA8}, + {0x23000, 0x233A8}, + {0x24000, 0x24034}, + + /* EFUSE0,1,2 is disabled here + * because its state may be reset + * + * {0x24800, 0x24804}, + * {0x25000, 0x25004}, + * {0x25800, 0x25804}, + */ + + {0x26000, 0x26064}, + {0x27000, 0x27024}, + {0x34000, 0x3400C}, + {0x34400, 0x3445C}, + {0x34800, 0x3485C}, + {0x34C00, 0x34C5C}, + {0x35000, 0x3505C}, + {0x35400, 0x3545C}, + {0x35800, 0x3585C}, + {0x35C00, 0x35C5C}, + {0x36000, 0x3605C}, + {0x38000, 0x38064}, + {0x38070, 0x380E0}, + {0x3A000, 0x3A074}, + + /* DBI windows is skipped here, it can be only accessed when pcie + * is active (not in reset) and CORE_CTRL_PCIE_LTSSM_EN = 0 && + * PCIE_CTRL_APP_LTSSM_ENALBE=0. + * {0x3C000 , 0x3C004}, + */ + + {0x40000, 0x400A4}, + + /* SI register is skipped here. + * Because it will cause bus hang + * + * {0x50000, 0x50018}, + */ + + {0x80000, 0x8000C}, + {0x80010, 0x80020}, +}; + +static const struct ath10k_mem_section qca6174_hw30_register_sections[] = { + {0x800, 0x810}, + {0x820, 0x82C}, + {0x830, 0x8F4}, + {0x90C, 0x91C}, + {0xA14, 0xA18}, + {0xA84, 0xA94}, + {0xAA8, 0xAD4}, + {0xADC, 0xB40}, + {0x1000, 0x10A4}, + {0x10BC, 0x111C}, + {0x1134, 0x1138}, + {0x1144, 0x114C}, + {0x1150, 0x115C}, + {0x1160, 0x1178}, + {0x1240, 0x1260}, + {0x2000, 0x207C}, + {0x3000, 0x3014}, + {0x4000, 0x4014}, + {0x5000, 0x5124}, + {0x6000, 0x6040}, + {0x6080, 0x60CC}, + {0x6100, 0x611C}, + {0x6140, 0x61D8}, + {0x6200, 0x6238}, + {0x6240, 0x628C}, + {0x62C0, 0x62EC}, + {0x6380, 0x63E8}, + {0x6400, 0x6440}, + {0x6480, 0x64CC}, + {0x6500, 0x651C}, + {0x6540, 0x6580}, + {0x6600, 0x6638}, + {0x6640, 0x668C}, + {0x66C0, 0x66EC}, + {0x6780, 0x67E8}, + {0x7080, 0x708C}, + {0x70C0, 0x70C8}, + {0x7400, 0x741C}, + {0x7440, 0x7454}, + {0x7800, 0x7818}, + {0x8000, 0x8004}, + {0x8010, 0x8064}, + {0x8080, 0x8084}, + {0x80A0, 0x80A4}, + {0x80C0, 0x80C4}, + {0x80E0, 0x80F4}, + {0x8100, 0x8104}, + {0x8110, 0x812C}, + {0x9000, 0x9004}, + {0x9800, 0x982C}, + {0x9830, 0x9838}, + {0x9840, 0x986C}, + {0x9870, 0x9898}, + {0x9A00, 0x9C00}, + {0xD580, 0xD59C}, + {0xF000, 0xF0E0}, + {0xF140, 0xF190}, + {0xF250, 0xF25C}, + {0xF260, 0xF268}, + {0xF26C, 0xF2A8}, + {0x10008, 0x1000C}, + {0x10014, 0x10018}, + {0x1001C, 0x10020}, + {0x10024, 0x10028}, + {0x10030, 0x10034}, + {0x10040, 0x10054}, + {0x10058, 0x1007C}, + {0x10080, 0x100C4}, + {0x100C8, 0x10114}, + {0x1012C, 0x10130}, + {0x10138, 0x10144}, + {0x10200, 0x10220}, + {0x10230, 0x10250}, + {0x10260, 0x10280}, + {0x10290, 0x102B0}, + {0x102C0, 0x102DC}, + {0x102E0, 0x102F4}, + {0x102FC, 0x1037C}, + {0x10380, 0x10390}, + {0x10800, 0x10828}, + {0x10840, 0x10844}, + {0x10880, 0x10884}, + {0x108C0, 0x108E8}, + {0x10900, 0x10928}, + {0x10940, 0x10944}, + {0x10980, 0x10984}, + {0x109C0, 0x109E8}, + {0x10A00, 0x10A28}, + {0x10A40, 0x10A50}, + {0x11000, 0x11028}, + {0x11030, 0x11034}, + {0x11038, 0x11068}, + {0x11070, 0x11074}, + {0x11078, 0x110A8}, + {0x110B0, 0x110B4}, + {0x110B8, 0x110E8}, + {0x110F0, 0x110F4}, + {0x110F8, 0x11128}, + {0x11138, 0x11144}, + {0x11178, 0x11180}, + {0x111B8, 0x111C0}, + {0x111F8, 0x11200}, + {0x11238, 0x1123C}, + {0x11270, 0x11274}, + {0x11278, 0x1127C}, + {0x112B0, 0x112B4}, + {0x112B8, 0x112BC}, + {0x112F0, 0x112F4}, + {0x112F8, 0x112FC}, + {0x11338, 0x1133C}, + {0x11378, 0x1137C}, + {0x113B8, 0x113BC}, + {0x113F8, 0x113FC}, + {0x11438, 0x11440}, + {0x11478, 0x11480}, + {0x114B8, 0x114BC}, + {0x114F8, 0x114FC}, + {0x11538, 0x1153C}, + {0x11578, 0x1157C}, + {0x115B8, 0x115BC}, + {0x115F8, 0x115FC}, + {0x11638, 0x1163C}, + {0x11678, 0x1167C}, + {0x116B8, 0x116BC}, + {0x116F8, 0x116FC}, + {0x11738, 0x1173C}, + {0x11778, 0x1177C}, + {0x117B8, 0x117BC}, + {0x117F8, 0x117FC}, + {0x17000, 0x1701C}, + {0x17020, 0x170AC}, + {0x18000, 0x18050}, + {0x18054, 0x18074}, + {0x18080, 0x180D4}, + {0x180DC, 0x18104}, + {0x18108, 0x1813C}, + {0x18144, 0x18148}, + {0x18168, 0x18174}, + {0x18178, 0x18180}, + {0x181C8, 0x181E0}, + {0x181E4, 0x181E8}, + {0x181EC, 0x1820C}, + {0x1825C, 0x18280}, + {0x18284, 0x18290}, + {0x18294, 0x182A0}, + {0x18300, 0x18304}, + {0x18314, 0x18320}, + {0x18328, 0x18350}, + {0x1835C, 0x1836C}, + {0x18370, 0x18390}, + {0x18398, 0x183AC}, + {0x183BC, 0x183D8}, + {0x183DC, 0x183F4}, + {0x18400, 0x186F4}, + {0x186F8, 0x1871C}, + {0x18720, 0x18790}, + {0x19800, 0x19830}, + {0x19834, 0x19840}, + {0x19880, 0x1989C}, + {0x198A4, 0x198B0}, + {0x198BC, 0x19900}, + {0x19C00, 0x19C88}, + {0x19D00, 0x19D20}, + {0x19E00, 0x19E7C}, + {0x19E80, 0x19E94}, + {0x19E98, 0x19EAC}, + {0x19EB0, 0x19EBC}, + {0x19F70, 0x19F74}, + {0x19F80, 0x19F8C}, + {0x19FA0, 0x19FB4}, + {0x19FC0, 0x19FD8}, + {0x1A000, 0x1A200}, + {0x1A204, 0x1A210}, + {0x1A228, 0x1A22C}, + {0x1A230, 0x1A248}, + {0x1A250, 0x1A270}, + {0x1A280, 0x1A290}, + {0x1A2A0, 0x1A2A4}, + {0x1A2C0, 0x1A2EC}, + {0x1A300, 0x1A3BC}, + {0x1A3F0, 0x1A3F4}, + {0x1A3F8, 0x1A434}, + {0x1A438, 0x1A444}, + {0x1A448, 0x1A468}, + {0x1A580, 0x1A58C}, + {0x1A644, 0x1A654}, + {0x1A670, 0x1A698}, + {0x1A6AC, 0x1A6B0}, + {0x1A6D0, 0x1A6D4}, + {0x1A6EC, 0x1A70C}, + {0x1A710, 0x1A738}, + {0x1A7C0, 0x1A7D0}, + {0x1A7D4, 0x1A7D8}, + {0x1A7DC, 0x1A7E4}, + {0x1A7F0, 0x1A7F8}, + {0x1A888, 0x1A89C}, + {0x1A8A8, 0x1A8AC}, + {0x1A8C0, 0x1A8DC}, + {0x1A8F0, 0x1A8FC}, + {0x1AE04, 0x1AE08}, + {0x1AE18, 0x1AE24}, + {0x1AF80, 0x1AF8C}, + {0x1AFA0, 0x1AFB4}, + {0x1B000, 0x1B200}, + {0x1B284, 0x1B288}, + {0x1B2D0, 0x1B2D8}, + {0x1B2DC, 0x1B2EC}, + {0x1B300, 0x1B340}, + {0x1B374, 0x1B378}, + {0x1B380, 0x1B384}, + {0x1B388, 0x1B38C}, + {0x1B404, 0x1B408}, + {0x1B420, 0x1B428}, + {0x1B440, 0x1B444}, + {0x1B448, 0x1B44C}, + {0x1B450, 0x1B458}, + {0x1B45C, 0x1B468}, + {0x1B584, 0x1B58C}, + {0x1B68C, 0x1B690}, + {0x1B6AC, 0x1B6B0}, + {0x1B7F0, 0x1B7F8}, + {0x1C800, 0x1CC00}, + {0x1CE00, 0x1CE04}, + {0x1CF80, 0x1CF84}, + {0x1D200, 0x1D800}, + {0x1E000, 0x20014}, + {0x20100, 0x20124}, + {0x21400, 0x217A8}, + {0x21800, 0x21BA8}, + {0x21C00, 0x21FA8}, + {0x22000, 0x223A8}, + {0x22400, 0x227A8}, + {0x22800, 0x22BA8}, + {0x22C00, 0x22FA8}, + {0x23000, 0x233A8}, + {0x24000, 0x24034}, + {0x26000, 0x26064}, + {0x27000, 0x27024}, + {0x34000, 0x3400C}, + {0x34400, 0x3445C}, + {0x34800, 0x3485C}, + {0x34C00, 0x34C5C}, + {0x35000, 0x3505C}, + {0x35400, 0x3545C}, + {0x35800, 0x3585C}, + {0x35C00, 0x35C5C}, + {0x36000, 0x3605C}, + {0x38000, 0x38064}, + {0x38070, 0x380E0}, + {0x3A000, 0x3A074}, + {0x40000, 0x400A4}, + {0x80000, 0x8000C}, + {0x80010, 0x80020}, +}; + +static const struct ath10k_mem_region qca6174_hw10_mem_regions[] = { + { + .type = ATH10K_MEM_REGION_TYPE_DRAM, + .start = 0x400000, + .len = 0x70000, + .name = "DRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_REG, + + /* RTC_SOC_BASE_ADDRESS */ + .start = 0x0, + + /* WLAN_MBOX_BASE_ADDRESS - RTC_SOC_BASE_ADDRESS */ + .len = 0x800 - 0x0, + + .name = "REG_PART1", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_REG, + + /* STEREO_BASE_ADDRESS */ + .start = 0x27000, + + /* USB_BASE_ADDRESS - STEREO_BASE_ADDRESS */ + .len = 0x60000 - 0x27000, + + .name = "REG_PART2", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, +}; + +static const struct ath10k_mem_region qca6174_hw21_mem_regions[] = { + { + .type = ATH10K_MEM_REGION_TYPE_DRAM, + .start = 0x400000, + .len = 0x70000, + .name = "DRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_AXI, + .start = 0xa0000, + .len = 0x18000, + .name = "AXI", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_REG, + .start = 0x800, + .len = 0x80020 - 0x800, + .name = "REG_TOTAL", + .section_table = { + .sections = qca6174_hw21_register_sections, + .size = ARRAY_SIZE(qca6174_hw21_register_sections), + }, + }, +}; + +static const struct ath10k_mem_region qca6174_hw30_sdio_mem_regions[] = { + { + .type = ATH10K_MEM_REGION_TYPE_DRAM, + .start = 0x400000, + .len = 0xa8000, + .name = "DRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_AXI, + .start = 0xa0000, + .len = 0x18000, + .name = "AXI", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IRAM1, + .start = 0x00980000, + .len = 0x00080000, + .name = "IRAM1", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IRAM2, + .start = 0x00a00000, + .len = 0x00040000, + .name = "IRAM2", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_REG, + .start = 0x800, + .len = 0x80020 - 0x800, + .name = "REG_TOTAL", + .section_table = { + .sections = qca6174_hw30_sdio_register_sections, + .size = ARRAY_SIZE(qca6174_hw30_sdio_register_sections), + }, + }, +}; + +static const struct ath10k_mem_region qca6174_hw30_mem_regions[] = { + { + .type = ATH10K_MEM_REGION_TYPE_DRAM, + .start = 0x400000, + .len = 0xa8000, + .name = "DRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_AXI, + .start = 0xa0000, + .len = 0x18000, + .name = "AXI", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_REG, + .start = 0x800, + .len = 0x80020 - 0x800, + .name = "REG_TOTAL", + .section_table = { + .sections = qca6174_hw30_register_sections, + .size = ARRAY_SIZE(qca6174_hw30_register_sections), + }, + }, + + /* IRAM dump must be put last */ + { + .type = ATH10K_MEM_REGION_TYPE_IRAM1, + .start = 0x00980000, + .len = 0x00080000, + .name = "IRAM1", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IRAM2, + .start = 0x00a00000, + .len = 0x00040000, + .name = "IRAM2", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, +}; + +static const struct ath10k_mem_region qca988x_hw20_mem_regions[] = { + { + .type = ATH10K_MEM_REGION_TYPE_DRAM, + .start = 0x400000, + .len = 0x50000, + .name = "DRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_REG, + .start = 0x4000, + .len = 0x2000, + .name = "REG_PART1", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_REG, + .start = 0x8000, + .len = 0x58000, + .name = "REG_PART2", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, +}; + +static const struct ath10k_mem_region qca99x0_hw20_mem_regions[] = { + { + .type = ATH10K_MEM_REGION_TYPE_DRAM, + .start = 0x400000, + .len = 0x60000, + .name = "DRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_REG, + .start = 0x980000, + .len = 0x50000, + .name = "IRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOSRAM, + .start = 0xC0000, + .len = 0x40000, + .name = "SRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x30000, + .len = 0x7000, + .name = "APB REG 1", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x3f000, + .len = 0x3000, + .name = "APB REG 2", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x43000, + .len = 0x3000, + .name = "WIFI REG", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x4A000, + .len = 0x5000, + .name = "CE REG", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x80000, + .len = 0x6000, + .name = "SOC REG", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, +}; + +static const struct ath10k_mem_region qca9984_hw10_mem_regions[] = { + { + .type = ATH10K_MEM_REGION_TYPE_DRAM, + .start = 0x400000, + .len = 0x80000, + .name = "DRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_REG, + .start = 0x980000, + .len = 0x50000, + .name = "IRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOSRAM, + .start = 0xC0000, + .len = 0x40000, + .name = "SRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x30000, + .len = 0x7000, + .name = "APB REG 1", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x3f000, + .len = 0x3000, + .name = "APB REG 2", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x43000, + .len = 0x3000, + .name = "WIFI REG", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x4A000, + .len = 0x5000, + .name = "CE REG", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x80000, + .len = 0x6000, + .name = "SOC REG", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, +}; + +static const struct ath10k_mem_section ipq4019_soc_reg_range[] = { + {0x080000, 0x080004}, + {0x080020, 0x080024}, + {0x080028, 0x080050}, + {0x0800d4, 0x0800ec}, + {0x08010c, 0x080118}, + {0x080284, 0x080290}, + {0x0802a8, 0x0802b8}, + {0x0802dc, 0x08030c}, + {0x082000, 0x083fff} +}; + +static const struct ath10k_mem_region qca4019_hw10_mem_regions[] = { + { + .type = ATH10K_MEM_REGION_TYPE_DRAM, + .start = 0x400000, + .len = 0x68000, + .name = "DRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_REG, + .start = 0xC0000, + .len = 0x40000, + .name = "SRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_REG, + .start = 0x980000, + .len = 0x50000, + .name = "IRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x30000, + .len = 0x7000, + .name = "APB REG 1", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x3f000, + .len = 0x3000, + .name = "APB REG 2", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x43000, + .len = 0x3000, + .name = "WIFI REG", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x4A000, + .len = 0x5000, + .name = "CE REG", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_REG, + .start = 0x080000, + .len = 0x083fff - 0x080000, + .name = "REG_TOTAL", + .section_table = { + .sections = ipq4019_soc_reg_range, + .size = ARRAY_SIZE(ipq4019_soc_reg_range), + }, + }, +}; + +static const struct ath10k_mem_region wcn399x_hw10_mem_regions[] = { + { + /* MSA region start is not fixed, hence it is assigned at runtime */ + .type = ATH10K_MEM_REGION_TYPE_MSA, + .len = 0x100000, + .name = "DRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, +}; + +static const struct ath10k_hw_mem_layout hw_mem_layouts[] = { + { + .hw_id = QCA6174_HW_1_0_VERSION, + .hw_rev = ATH10K_HW_QCA6174, + .bus = ATH10K_BUS_PCI, + .region_table = { + .regions = qca6174_hw10_mem_regions, + .size = ARRAY_SIZE(qca6174_hw10_mem_regions), + }, + }, + { + .hw_id = QCA6174_HW_1_1_VERSION, + .hw_rev = ATH10K_HW_QCA6174, + .bus = ATH10K_BUS_PCI, + .region_table = { + .regions = qca6174_hw10_mem_regions, + .size = ARRAY_SIZE(qca6174_hw10_mem_regions), + }, + }, + { + .hw_id = QCA6174_HW_1_3_VERSION, + .hw_rev = ATH10K_HW_QCA6174, + .bus = ATH10K_BUS_PCI, + .region_table = { + .regions = qca6174_hw10_mem_regions, + .size = ARRAY_SIZE(qca6174_hw10_mem_regions), + }, + }, + { + .hw_id = QCA6174_HW_2_1_VERSION, + .hw_rev = ATH10K_HW_QCA6174, + .bus = ATH10K_BUS_PCI, + .region_table = { + .regions = qca6174_hw21_mem_regions, + .size = ARRAY_SIZE(qca6174_hw21_mem_regions), + }, + }, + { + .hw_id = QCA6174_HW_3_0_VERSION, + .hw_rev = ATH10K_HW_QCA6174, + .bus = ATH10K_BUS_PCI, + .region_table = { + .regions = qca6174_hw30_mem_regions, + .size = ARRAY_SIZE(qca6174_hw30_mem_regions), + }, + }, + { + .hw_id = QCA6174_HW_3_2_VERSION, + .hw_rev = ATH10K_HW_QCA6174, + .bus = ATH10K_BUS_PCI, + .region_table = { + .regions = qca6174_hw30_mem_regions, + .size = ARRAY_SIZE(qca6174_hw30_mem_regions), + }, + }, + { + .hw_id = QCA6174_HW_3_2_VERSION, + .hw_rev = ATH10K_HW_QCA6174, + .bus = ATH10K_BUS_SDIO, + .region_table = { + .regions = qca6174_hw30_sdio_mem_regions, + .size = ARRAY_SIZE(qca6174_hw30_sdio_mem_regions), + }, + }, + { + .hw_id = QCA9377_HW_1_1_DEV_VERSION, + .hw_rev = ATH10K_HW_QCA9377, + .bus = ATH10K_BUS_PCI, + .region_table = { + .regions = qca6174_hw30_mem_regions, + .size = ARRAY_SIZE(qca6174_hw30_mem_regions), + }, + }, + { + .hw_id = QCA988X_HW_2_0_VERSION, + .hw_rev = ATH10K_HW_QCA988X, + .bus = ATH10K_BUS_PCI, + .region_table = { + .regions = qca988x_hw20_mem_regions, + .size = ARRAY_SIZE(qca988x_hw20_mem_regions), + }, + }, + { + .hw_id = QCA9984_HW_1_0_DEV_VERSION, + .hw_rev = ATH10K_HW_QCA9984, + .bus = ATH10K_BUS_PCI, + .region_table = { + .regions = qca9984_hw10_mem_regions, + .size = ARRAY_SIZE(qca9984_hw10_mem_regions), + }, + }, + { + .hw_id = QCA9888_HW_2_0_DEV_VERSION, + .hw_rev = ATH10K_HW_QCA9888, + .bus = ATH10K_BUS_PCI, + .region_table = { + .regions = qca9984_hw10_mem_regions, + .size = ARRAY_SIZE(qca9984_hw10_mem_regions), + }, + }, + { + .hw_id = QCA99X0_HW_2_0_DEV_VERSION, + .hw_rev = ATH10K_HW_QCA99X0, + .bus = ATH10K_BUS_PCI, + .region_table = { + .regions = qca99x0_hw20_mem_regions, + .size = ARRAY_SIZE(qca99x0_hw20_mem_regions), + }, + }, + { + .hw_id = QCA4019_HW_1_0_DEV_VERSION, + .hw_rev = ATH10K_HW_QCA4019, + .bus = ATH10K_BUS_AHB, + .region_table = { + .regions = qca4019_hw10_mem_regions, + .size = ARRAY_SIZE(qca4019_hw10_mem_regions), + }, + }, + { + .hw_id = WCN3990_HW_1_0_DEV_VERSION, + .hw_rev = ATH10K_HW_WCN3990, + .bus = ATH10K_BUS_SNOC, + .region_table = { + .regions = wcn399x_hw10_mem_regions, + .size = ARRAY_SIZE(wcn399x_hw10_mem_regions), + }, + }, +}; + +static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar) +{ + const struct ath10k_hw_mem_layout *hw; + const struct ath10k_mem_region *mem_region; + size_t size = 0; + int i; + + hw = ath10k_coredump_get_mem_layout(ar); + + if (!hw) + return 0; + + mem_region = &hw->region_table.regions[0]; + + for (i = 0; i < hw->region_table.size; i++) { + size += mem_region->len; + mem_region++; + } + + /* reserve space for the headers */ + size += hw->region_table.size * sizeof(struct ath10k_dump_ram_data_hdr); + + /* make sure it is aligned 16 bytes for debug message print out */ + size = ALIGN(size, 16); + + return size; +} + +const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar) +{ + if (!test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) + return NULL; + + return _ath10k_coredump_get_mem_layout(ar); +} +EXPORT_SYMBOL(ath10k_coredump_get_mem_layout); + +const struct ath10k_hw_mem_layout *_ath10k_coredump_get_mem_layout(struct ath10k *ar) +{ + int i; + + if (WARN_ON(ar->target_version == 0)) + return NULL; + + for (i = 0; i < ARRAY_SIZE(hw_mem_layouts); i++) { + if (ar->target_version == hw_mem_layouts[i].hw_id && + ar->hw_rev == hw_mem_layouts[i].hw_rev && + hw_mem_layouts[i].bus == ar->hif.bus) + return &hw_mem_layouts[i]; + } + + return NULL; +} + +struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar) +{ + struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data; + + lockdep_assert_held(&ar->dump_mutex); + + if (ath10k_coredump_mask == 0) + /* coredump disabled */ + return NULL; + + guid_gen(&crash_data->guid); + ktime_get_real_ts64(&crash_data->timestamp); + + return crash_data; +} +EXPORT_SYMBOL(ath10k_coredump_new); + +static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar) +{ + struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data; + struct ath10k_ce_crash_hdr *ce_hdr; + struct ath10k_dump_file_data *dump_data; + struct ath10k_tlv_dump_data *dump_tlv; + size_t hdr_len = sizeof(*dump_data); + size_t len, sofar = 0; + unsigned char *buf; + + len = hdr_len; + + if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask)) + len += sizeof(*dump_tlv) + sizeof(crash_data->registers); + + if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask)) + len += sizeof(*dump_tlv) + sizeof(*ce_hdr) + + CE_COUNT * sizeof(ce_hdr->entries[0]); + + if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) + len += sizeof(*dump_tlv) + crash_data->ramdump_buf_len; + + sofar += hdr_len; + + /* This is going to get big when we start dumping FW RAM and such, + * so go ahead and use vmalloc. + */ + buf = vzalloc(len); + if (!buf) + return NULL; + + mutex_lock(&ar->dump_mutex); + + dump_data = (struct ath10k_dump_file_data *)(buf); + strscpy(dump_data->df_magic, "ATH10K-FW-DUMP", + sizeof(dump_data->df_magic)); + dump_data->len = cpu_to_le32(len); + + dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION); + + guid_copy(&dump_data->guid, &crash_data->guid); + dump_data->chip_id = cpu_to_le32(ar->bus_param.chip_id); + dump_data->bus_type = cpu_to_le32(0); + dump_data->target_version = cpu_to_le32(ar->target_version); + dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major); + dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor); + dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release); + dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build); + dump_data->phy_capability = cpu_to_le32(ar->phy_capability); + dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power); + dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power); + dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info); + dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info); + dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains); + + strscpy(dump_data->fw_ver, ar->hw->wiphy->fw_version, + sizeof(dump_data->fw_ver)); + + dump_data->kernel_ver_code = 0; + strscpy(dump_data->kernel_ver, init_utsname()->release, + sizeof(dump_data->kernel_ver)); + + dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec); + dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec); + + if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask)) { + dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar); + dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS); + dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers)); + memcpy(dump_tlv->tlv_data, &crash_data->registers, + sizeof(crash_data->registers)); + sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers); + } + + if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask)) { + dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar); + dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA); + dump_tlv->tlv_len = cpu_to_le32(struct_size(ce_hdr, entries, + CE_COUNT)); + ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data); + ce_hdr->ce_count = cpu_to_le32(CE_COUNT); + memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved)); + memcpy(ce_hdr->entries, crash_data->ce_crash_data, + CE_COUNT * sizeof(ce_hdr->entries[0])); + sofar += sizeof(*dump_tlv) + sizeof(*ce_hdr) + + CE_COUNT * sizeof(ce_hdr->entries[0]); + } + + /* Gather ram dump */ + if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) { + dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar); + dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_RAM_DATA); + dump_tlv->tlv_len = cpu_to_le32(crash_data->ramdump_buf_len); + if (crash_data->ramdump_buf_len) { + memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf, + crash_data->ramdump_buf_len); + sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len; + } + } + + mutex_unlock(&ar->dump_mutex); + + return dump_data; +} + +int ath10k_coredump_submit(struct ath10k *ar) +{ + struct ath10k_dump_file_data *dump; + + if (ath10k_coredump_mask == 0) + /* coredump disabled */ + return 0; + + dump = ath10k_coredump_build(ar); + if (!dump) { + ath10k_warn(ar, "no crash dump data found for devcoredump"); + return -ENODATA; + } + + dev_coredumpv(ar->dev, dump, le32_to_cpu(dump->len), GFP_KERNEL); + + return 0; +} + +int ath10k_coredump_create(struct ath10k *ar) +{ + if (ath10k_coredump_mask == 0) + /* coredump disabled */ + return 0; + + ar->coredump.fw_crash_data = vzalloc(sizeof(*ar->coredump.fw_crash_data)); + if (!ar->coredump.fw_crash_data) + return -ENOMEM; + + return 0; +} + +int ath10k_coredump_register(struct ath10k *ar) +{ + struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data; + + if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) { + crash_data->ramdump_buf_len = ath10k_coredump_get_ramdump_size(ar); + + if (!crash_data->ramdump_buf_len) + return 0; + + crash_data->ramdump_buf = vzalloc(crash_data->ramdump_buf_len); + if (!crash_data->ramdump_buf) + return -ENOMEM; + } + + return 0; +} + +void ath10k_coredump_unregister(struct ath10k *ar) +{ + struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data; + + vfree(crash_data->ramdump_buf); +} + +void ath10k_coredump_destroy(struct ath10k *ar) +{ + if (ar->coredump.fw_crash_data->ramdump_buf) { + vfree(ar->coredump.fw_crash_data->ramdump_buf); + ar->coredump.fw_crash_data->ramdump_buf = NULL; + ar->coredump.fw_crash_data->ramdump_buf_len = 0; + } + + vfree(ar->coredump.fw_crash_data); + ar->coredump.fw_crash_data = NULL; +} diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h new file mode 100644 index 000000000000..8d274e0f374b --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/coredump.h @@ -0,0 +1,231 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _COREDUMP_H_ +#define _COREDUMP_H_ + +#include "core.h" + +#define ATH10K_FW_CRASH_DUMP_VERSION 1 + +/** + * enum ath10k_fw_crash_dump_type - types of data in the dump file + * @ATH10K_FW_CRASH_DUMP_REGISTERS: Register crash dump in binary format + * @ATH10K_FW_CRASH_DUMP_CE_DATA: Copy Engine crash dump data + * @ATH10K_FW_CRASH_DUMP_RAM_DATA: RAM crash dump data, contains multiple + * struct ath10k_dump_ram_data_hdr + * @ATH10K_FW_CRASH_DUMP_MAX: Maximum enumeration + */ +enum ath10k_fw_crash_dump_type { + ATH10K_FW_CRASH_DUMP_REGISTERS = 0, + ATH10K_FW_CRASH_DUMP_CE_DATA = 1, + + /* contains multiple struct ath10k_dump_ram_data_hdr */ + ATH10K_FW_CRASH_DUMP_RAM_DATA = 2, + + ATH10K_FW_CRASH_DUMP_MAX, +}; + +struct ath10k_tlv_dump_data { + /* see ath10k_fw_crash_dump_type above */ + __le32 type; + + /* in bytes */ + __le32 tlv_len; + + /* pad to 32-bit boundaries as needed */ + u8 tlv_data[]; +} __packed; + +struct ath10k_dump_file_data { + /* dump file information */ + + /* "ATH10K-FW-DUMP" */ + char df_magic[16]; + + __le32 len; + + /* file dump version */ + __le32 version; + + /* some info we can get from ath10k struct that might help */ + + guid_t guid; + + __le32 chip_id; + + /* 0 for now, in place for later hardware */ + __le32 bus_type; + + __le32 target_version; + __le32 fw_version_major; + __le32 fw_version_minor; + __le32 fw_version_release; + __le32 fw_version_build; + __le32 phy_capability; + __le32 hw_min_tx_power; + __le32 hw_max_tx_power; + __le32 ht_cap_info; + __le32 vht_cap_info; + __le32 num_rf_chains; + + /* firmware version string */ + char fw_ver[ETHTOOL_FWVERS_LEN]; + + /* Kernel related information */ + + /* time-of-day stamp */ + __le64 tv_sec; + + /* time-of-day stamp, nano-seconds */ + __le64 tv_nsec; + + /* LINUX_VERSION_CODE */ + __le32 kernel_ver_code; + + /* VERMAGIC_STRING */ + char kernel_ver[64]; + + /* room for growth w/out changing binary format */ + u8 unused[128]; + + /* struct ath10k_tlv_dump_data + more */ + u8 data[]; +} __packed; + +struct ath10k_dump_ram_data_hdr { + /* enum ath10k_mem_region_type */ + __le32 region_type; + + __le32 start; + + /* length of payload data, not including this header */ + __le32 length; + + u8 data[]; +}; + +/* magic number to fill the holes not copied due to sections in regions */ +#define ATH10K_MAGIC_NOT_COPIED 0xAA + +/* part of user space ABI */ +enum ath10k_mem_region_type { + ATH10K_MEM_REGION_TYPE_REG = 1, + ATH10K_MEM_REGION_TYPE_DRAM = 2, + ATH10K_MEM_REGION_TYPE_AXI = 3, + ATH10K_MEM_REGION_TYPE_IRAM1 = 4, + ATH10K_MEM_REGION_TYPE_IRAM2 = 5, + ATH10K_MEM_REGION_TYPE_IOSRAM = 6, + ATH10K_MEM_REGION_TYPE_IOREG = 7, + ATH10K_MEM_REGION_TYPE_MSA = 8, +}; + +/* Define a section of the region which should be copied. As not all parts + * of the memory is possible to copy, for example some of the registers can + * be like that, sections can be used to define what is safe to copy. + * + * To minimize the size of the array, the list must obey the format: + * '{start0,stop0},{start1,stop1},{start2,stop2}....' The values below must + * also obey to 'start0 < stop0 < start1 < stop1 < start2 < ...', otherwise + * we may encounter error in the dump processing. + */ +struct ath10k_mem_section { + u32 start; + u32 end; +}; + +/* One region of a memory layout. If the sections field is null entire + * region is copied. If sections is non-null only the areas specified in + * sections are copied and rest of the areas are filled with + * ATH10K_MAGIC_NOT_COPIED. + */ +struct ath10k_mem_region { + enum ath10k_mem_region_type type; + u32 start; + u32 len; + + const char *name; + + struct { + const struct ath10k_mem_section *sections; + u32 size; + } section_table; +}; + +/* Contains the memory layout of a hardware version identified with the + * hardware id, split into regions. + */ +struct ath10k_hw_mem_layout { + u32 hw_id; + u32 hw_rev; + enum ath10k_bus bus; + + struct { + const struct ath10k_mem_region *regions; + int size; + } region_table; +}; + +/* FIXME: where to put this? */ +extern unsigned long ath10k_coredump_mask; + +#ifdef CONFIG_DEV_COREDUMP + +int ath10k_coredump_submit(struct ath10k *ar); +struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar); +int ath10k_coredump_create(struct ath10k *ar); +int ath10k_coredump_register(struct ath10k *ar); +void ath10k_coredump_unregister(struct ath10k *ar); +void ath10k_coredump_destroy(struct ath10k *ar); + +const struct ath10k_hw_mem_layout *_ath10k_coredump_get_mem_layout(struct ath10k *ar); +const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar); + +#else /* CONFIG_DEV_COREDUMP */ + +static inline int ath10k_coredump_submit(struct ath10k *ar) +{ + return 0; +} + +static inline struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar) +{ + return NULL; +} + +static inline int ath10k_coredump_create(struct ath10k *ar) +{ + return 0; +} + +static inline int ath10k_coredump_register(struct ath10k *ar) +{ + return 0; +} + +static inline void ath10k_coredump_unregister(struct ath10k *ar) +{ +} + +static inline void ath10k_coredump_destroy(struct ath10k *ar) +{ +} + +static inline const struct ath10k_hw_mem_layout * +ath10k_coredump_get_mem_layout(struct ath10k *ar) +{ + return NULL; +} + +static inline const struct ath10k_hw_mem_layout * +_ath10k_coredump_get_mem_layout(struct ath10k *ar) +{ + return NULL; +} + +#endif /* CONFIG_DEV_COREDUMP */ + +#endif /* _COREDUMP_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 499034b873d1..b7520220465a 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -1,120 +1,165 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include <linux/module.h> #include <linux/debugfs.h> +#include <linux/export.h> +#include <linux/vmalloc.h> +#include <linux/crc32.h> +#include <linux/firmware.h> +#include <linux/kstrtox.h> #include "core.h" #include "debug.h" +#include "hif.h" +#include "wmi-ops.h" -static int ath10k_printk(const char *level, const char *fmt, ...) -{ - struct va_format vaf; - va_list args; - int rtn; - - va_start(args, fmt); - - vaf.fmt = fmt; - vaf.va = &args; +/* ms */ +#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000 - rtn = printk("%sath10k: %pV", level, &vaf); +#define ATH10K_DEBUG_CAL_DATA_LEN 12064 - va_end(args); - - return rtn; -} - -int ath10k_info(const char *fmt, ...) +void ath10k_info(struct ath10k *ar, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; - int ret; va_start(args, fmt); vaf.va = &args; - ret = ath10k_printk(KERN_INFO, "%pV", &vaf); - trace_ath10k_log_info(&vaf); + dev_info(ar->dev, "%pV", &vaf); + trace_ath10k_log_info(ar, &vaf); va_end(args); - - return ret; } EXPORT_SYMBOL(ath10k_info); -int ath10k_err(const char *fmt, ...) +void ath10k_debug_print_hwfw_info(struct ath10k *ar) +{ + const struct firmware *firmware; + char fw_features[128] = {}; + u32 crc = 0; + + ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features)); + + ath10k_info(ar, "%s target 0x%08x chip_id 0x%08x sub %04x:%04x", + ar->hw_params.name, + ar->target_version, + ar->bus_param.chip_id, + ar->id.subsystem_vendor, ar->id.subsystem_device); + + ath10k_info(ar, "kconfig debug %d debugfs %d tracing %d dfs %d testmode %d\n", + IS_ENABLED(CONFIG_ATH10K_DEBUG), + IS_ENABLED(CONFIG_ATH10K_DEBUGFS), + IS_ENABLED(CONFIG_ATH10K_TRACING), + IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED), + IS_ENABLED(CONFIG_NL80211_TESTMODE)); + + firmware = ar->normal_mode_fw.fw_file.firmware; + if (firmware) + crc = crc32_le(0, firmware->data, firmware->size); + + ath10k_info(ar, "firmware ver %s api %d features %s crc32 %08x\n", + ar->hw->wiphy->fw_version, + ar->fw_api, + fw_features, + crc); +} + +void ath10k_debug_print_board_info(struct ath10k *ar) +{ + char boardinfo[100]; + const struct firmware *board; + u32 crc; + + if (ar->id.bmi_ids_valid) + scnprintf(boardinfo, sizeof(boardinfo), "%d:%d", + ar->id.bmi_chip_id, ar->id.bmi_board_id); + else + scnprintf(boardinfo, sizeof(boardinfo), "N/A"); + + board = ar->normal_mode_fw.board; + if (!IS_ERR_OR_NULL(board)) + crc = crc32_le(0, board->data, board->size); + else + crc = 0; + + ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x", + ar->bd_api, + boardinfo, + crc); +} + +void ath10k_debug_print_boot_info(struct ath10k *ar) +{ + ath10k_info(ar, "htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d\n", + ar->htt.target_version_major, + ar->htt.target_version_minor, + ar->normal_mode_fw.fw_file.wmi_op_version, + ar->normal_mode_fw.fw_file.htt_op_version, + ath10k_cal_mode_str(ar->cal_mode), + ar->max_num_stations, + test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags), + !test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags)); +} + +void ath10k_print_driver_info(struct ath10k *ar) +{ + ath10k_debug_print_hwfw_info(ar); + ath10k_debug_print_board_info(ar); + ath10k_debug_print_boot_info(ar); +} +EXPORT_SYMBOL(ath10k_print_driver_info); + +void ath10k_err(struct ath10k *ar, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; - int ret; va_start(args, fmt); vaf.va = &args; - ret = ath10k_printk(KERN_ERR, "%pV", &vaf); - trace_ath10k_log_err(&vaf); + dev_err(ar->dev, "%pV", &vaf); + trace_ath10k_log_err(ar, &vaf); va_end(args); - - return ret; } EXPORT_SYMBOL(ath10k_err); -int ath10k_warn(const char *fmt, ...) +void ath10k_warn(struct ath10k *ar, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; - int ret = 0; va_start(args, fmt); vaf.va = &args; - - if (net_ratelimit()) - ret = ath10k_printk(KERN_WARNING, "%pV", &vaf); - - trace_ath10k_log_warn(&vaf); + dev_warn_ratelimited(ar->dev, "%pV", &vaf); + trace_ath10k_log_warn(ar, &vaf); va_end(args); - - return ret; } EXPORT_SYMBOL(ath10k_warn); #ifdef CONFIG_ATH10K_DEBUGFS -void ath10k_debug_read_service_map(struct ath10k *ar, - void *service_map, - size_t map_size) -{ - memcpy(ar->debug.wmi_service_bitmap, service_map, map_size); -} - static ssize_t ath10k_read_wmi_services(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; char *buf; - unsigned int len = 0, buf_len = 1500; - const char *status; + size_t len = 0, buf_len = 8192; + const char *name; ssize_t ret_cnt; + bool enabled; int i; buf = kzalloc(buf_len, GFP_KERNEL); @@ -123,19 +168,25 @@ static ssize_t ath10k_read_wmi_services(struct file *file, mutex_lock(&ar->conf_mutex); - if (len > buf_len) - len = buf_len; + spin_lock_bh(&ar->data_lock); + for (i = 0; i < WMI_SERVICE_MAX; i++) { + enabled = test_bit(i, ar->wmi.svc_map); + name = wmi_service_name(i); + + if (!name) { + if (enabled) + len += scnprintf(buf + len, buf_len - len, + "%-40s %s (bit %d)\n", + "unknown", "enabled", i); - for (i = 0; i < WMI_SERVICE_LAST; i++) { - if (WMI_SERVICE_IS_ENABLED(ar->debug.wmi_service_bitmap, i)) - status = "enabled"; - else - status = "disabled"; + continue; + } len += scnprintf(buf + len, buf_len - len, - "0x%02x - %20s - %s\n", - i, wmi_service_name(i), status); + "%-40s %s\n", + name, enabled ? "enabled" : "-"); } + spin_unlock_bh(&ar->data_lock); ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); @@ -152,281 +203,1998 @@ static const struct file_operations fops_wmi_services = { .llseek = default_llseek, }; -void ath10k_debug_read_target_stats(struct ath10k *ar, - struct wmi_stats_event *ev) +static void ath10k_fw_stats_pdevs_free(struct list_head *head) { - u8 *tmp = ev->data; - struct ath10k_target_stats *stats; - int num_pdev_stats, num_vdev_stats, num_peer_stats; - struct wmi_pdev_stats *ps; - int i; + struct ath10k_fw_stats_pdev *i, *tmp; - mutex_lock(&ar->conf_mutex); + list_for_each_entry_safe(i, tmp, head, list) { + list_del(&i->list); + kfree(i); + } +} + +static void ath10k_fw_stats_vdevs_free(struct list_head *head) +{ + struct ath10k_fw_stats_vdev *i, *tmp; + + list_for_each_entry_safe(i, tmp, head, list) { + list_del(&i->list); + kfree(i); + } +} + +static void ath10k_fw_stats_peers_free(struct list_head *head) +{ + struct ath10k_fw_stats_peer *i, *tmp; + + list_for_each_entry_safe(i, tmp, head, list) { + list_del(&i->list); + kfree(i); + } +} + +static void ath10k_fw_extd_stats_peers_free(struct list_head *head) +{ + struct ath10k_fw_extd_stats_peer *i, *tmp; + + list_for_each_entry_safe(i, tmp, head, list) { + list_del(&i->list); + kfree(i); + } +} + +static void ath10k_debug_fw_stats_reset(struct ath10k *ar) +{ + spin_lock_bh(&ar->data_lock); + ar->debug.fw_stats_done = false; + ar->debug.fw_stats.extended = false; + ath10k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs); + ath10k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs); + ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers); + ath10k_fw_extd_stats_peers_free(&ar->debug.fw_stats.peers_extd); + spin_unlock_bh(&ar->data_lock); +} + +void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) +{ + struct ath10k_fw_stats stats = {}; + bool is_start, is_started, is_end; + size_t num_peers; + size_t num_vdevs; + int ret; - stats = &ar->debug.target_stats; - - num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); /* 0 or 1 */ - num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); /* 0 or max vdevs */ - num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */ - - if (num_pdev_stats) { - ps = (struct wmi_pdev_stats *)tmp; - - stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf); - stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count); - stats->rx_frame_count = __le32_to_cpu(ps->rx_frame_count); - stats->rx_clear_count = __le32_to_cpu(ps->rx_clear_count); - stats->cycle_count = __le32_to_cpu(ps->cycle_count); - stats->phy_err_count = __le32_to_cpu(ps->phy_err_count); - stats->chan_tx_power = __le32_to_cpu(ps->chan_tx_pwr); - - stats->comp_queued = __le32_to_cpu(ps->wal.tx.comp_queued); - stats->comp_delivered = - __le32_to_cpu(ps->wal.tx.comp_delivered); - stats->msdu_enqued = __le32_to_cpu(ps->wal.tx.msdu_enqued); - stats->mpdu_enqued = __le32_to_cpu(ps->wal.tx.mpdu_enqued); - stats->wmm_drop = __le32_to_cpu(ps->wal.tx.wmm_drop); - stats->local_enqued = __le32_to_cpu(ps->wal.tx.local_enqued); - stats->local_freed = __le32_to_cpu(ps->wal.tx.local_freed); - stats->hw_queued = __le32_to_cpu(ps->wal.tx.hw_queued); - stats->hw_reaped = __le32_to_cpu(ps->wal.tx.hw_reaped); - stats->underrun = __le32_to_cpu(ps->wal.tx.underrun); - stats->tx_abort = __le32_to_cpu(ps->wal.tx.tx_abort); - stats->mpdus_requed = __le32_to_cpu(ps->wal.tx.mpdus_requed); - stats->tx_ko = __le32_to_cpu(ps->wal.tx.tx_ko); - stats->data_rc = __le32_to_cpu(ps->wal.tx.data_rc); - stats->self_triggers = __le32_to_cpu(ps->wal.tx.self_triggers); - stats->sw_retry_failure = - __le32_to_cpu(ps->wal.tx.sw_retry_failure); - stats->illgl_rate_phy_err = - __le32_to_cpu(ps->wal.tx.illgl_rate_phy_err); - stats->pdev_cont_xretry = - __le32_to_cpu(ps->wal.tx.pdev_cont_xretry); - stats->pdev_tx_timeout = - __le32_to_cpu(ps->wal.tx.pdev_tx_timeout); - stats->pdev_resets = __le32_to_cpu(ps->wal.tx.pdev_resets); - stats->phy_underrun = __le32_to_cpu(ps->wal.tx.phy_underrun); - stats->txop_ovf = __le32_to_cpu(ps->wal.tx.txop_ovf); - - stats->mid_ppdu_route_change = - __le32_to_cpu(ps->wal.rx.mid_ppdu_route_change); - stats->status_rcvd = __le32_to_cpu(ps->wal.rx.status_rcvd); - stats->r0_frags = __le32_to_cpu(ps->wal.rx.r0_frags); - stats->r1_frags = __le32_to_cpu(ps->wal.rx.r1_frags); - stats->r2_frags = __le32_to_cpu(ps->wal.rx.r2_frags); - stats->r3_frags = __le32_to_cpu(ps->wal.rx.r3_frags); - stats->htt_msdus = __le32_to_cpu(ps->wal.rx.htt_msdus); - stats->htt_mpdus = __le32_to_cpu(ps->wal.rx.htt_mpdus); - stats->loc_msdus = __le32_to_cpu(ps->wal.rx.loc_msdus); - stats->loc_mpdus = __le32_to_cpu(ps->wal.rx.loc_mpdus); - stats->oversize_amsdu = - __le32_to_cpu(ps->wal.rx.oversize_amsdu); - stats->phy_errs = __le32_to_cpu(ps->wal.rx.phy_errs); - stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop); - stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs); - - tmp += sizeof(struct wmi_pdev_stats); - } - - /* 0 or max vdevs */ - /* Currently firmware does not support VDEV stats */ - if (num_vdev_stats) { - struct wmi_vdev_stats *vdev_stats; - - for (i = 0; i < num_vdev_stats; i++) { - vdev_stats = (struct wmi_vdev_stats *)tmp; - tmp += sizeof(struct wmi_vdev_stats); + INIT_LIST_HEAD(&stats.pdevs); + INIT_LIST_HEAD(&stats.vdevs); + INIT_LIST_HEAD(&stats.peers); + INIT_LIST_HEAD(&stats.peers_extd); + + spin_lock_bh(&ar->data_lock); + ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats); + if (ret) { + ath10k_warn(ar, "failed to pull fw stats: %d\n", ret); + goto free; + } + + /* Stat data may exceed htc-wmi buffer limit. In such case firmware + * splits the stats data and delivers it in a ping-pong fashion of + * request cmd-update event. + * + * However there is no explicit end-of-data. Instead start-of-data is + * used as an implicit one. This works as follows: + * a) discard stat update events until one with pdev stats is + * delivered - this skips session started at end of (b) + * b) consume stat update events until another one with pdev stats is + * delivered which is treated as end-of-data and is itself discarded + */ + if (ath10k_peer_stats_enabled(ar)) + ath10k_sta_update_rx_duration(ar, &stats); + + if (ar->debug.fw_stats_done) { + if (!ath10k_peer_stats_enabled(ar)) + ath10k_warn(ar, "received unsolicited stats update event\n"); + + goto free; + } + + num_peers = list_count_nodes(&ar->debug.fw_stats.peers); + num_vdevs = list_count_nodes(&ar->debug.fw_stats.vdevs); + is_start = (list_empty(&ar->debug.fw_stats.pdevs) && + !list_empty(&stats.pdevs)); + is_end = (!list_empty(&ar->debug.fw_stats.pdevs) && + !list_empty(&stats.pdevs)); + + if (is_start) + list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs); + + if (is_end) + ar->debug.fw_stats_done = true; + + if (stats.extended) + ar->debug.fw_stats.extended = true; + + is_started = !list_empty(&ar->debug.fw_stats.pdevs); + + if (is_started && !is_end) { + if (num_peers >= ATH10K_MAX_NUM_PEER_IDS) { + /* Although this is unlikely impose a sane limit to + * prevent firmware from DoS-ing the host. + */ + ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers); + ath10k_fw_extd_stats_peers_free(&ar->debug.fw_stats.peers_extd); + ath10k_warn(ar, "dropping fw peer stats\n"); + goto free; + } + + if (num_vdevs >= BITS_PER_LONG) { + ath10k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs); + ath10k_warn(ar, "dropping fw vdev stats\n"); + goto free; } + + if (!list_empty(&stats.peers)) + list_splice_tail_init(&stats.peers_extd, + &ar->debug.fw_stats.peers_extd); + + list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers); + list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs); } - if (num_peer_stats) { - struct wmi_peer_stats *peer_stats; - struct ath10k_peer_stat *s; + complete(&ar->debug.fw_stats_complete); + +free: + /* In some cases lists have been spliced and cleared. Free up + * resources if that is not the case. + */ + ath10k_fw_stats_pdevs_free(&stats.pdevs); + ath10k_fw_stats_vdevs_free(&stats.vdevs); + ath10k_fw_stats_peers_free(&stats.peers); + ath10k_fw_extd_stats_peers_free(&stats.peers_extd); + + spin_unlock_bh(&ar->data_lock); +} + +int ath10k_debug_fw_stats_request(struct ath10k *ar) +{ + unsigned long timeout, time_left; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + timeout = jiffies + msecs_to_jiffies(1 * HZ); + + ath10k_debug_fw_stats_reset(ar); + + for (;;) { + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; - stats->peers = num_peer_stats; + reinit_completion(&ar->debug.fw_stats_complete); - for (i = 0; i < num_peer_stats; i++) { - peer_stats = (struct wmi_peer_stats *)tmp; - s = &stats->peer_stat[i]; + ret = ath10k_wmi_request_stats(ar, ar->fw_stats_req_mask); + if (ret) { + ath10k_warn(ar, "could not request stats (%d)\n", ret); + return ret; + } - WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_stats->peer_macaddr, - s->peer_macaddr); - s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi); - s->peer_tx_rate = - __le32_to_cpu(peer_stats->peer_tx_rate); + time_left = + wait_for_completion_timeout(&ar->debug.fw_stats_complete, + 1 * HZ); + if (!time_left) + return -ETIMEDOUT; - tmp += sizeof(struct wmi_peer_stats); + spin_lock_bh(&ar->data_lock); + if (ar->debug.fw_stats_done) { + spin_unlock_bh(&ar->data_lock); + break; } + spin_unlock_bh(&ar->data_lock); + } + + return 0; +} + +static int ath10k_fw_stats_open(struct inode *inode, struct file *file) +{ + struct ath10k *ar = inode->i_private; + void *buf = NULL; + int ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON) { + ret = -ENETDOWN; + goto err_unlock; } + buf = vmalloc(ATH10K_FW_STATS_BUF_SIZE); + if (!buf) { + ret = -ENOMEM; + goto err_unlock; + } + + ret = ath10k_debug_fw_stats_request(ar); + if (ret) { + ath10k_warn(ar, "failed to request fw stats: %d\n", ret); + goto err_free; + } + + ret = ath10k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, buf); + if (ret) { + ath10k_warn(ar, "failed to fill fw stats: %d\n", ret); + goto err_free; + } + + file->private_data = buf; + mutex_unlock(&ar->conf_mutex); - complete(&ar->debug.event_stats_compl); + return 0; + +err_free: + vfree(buf); + +err_unlock: + mutex_unlock(&ar->conf_mutex); + return ret; } -static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf, +static int ath10k_fw_stats_release(struct inode *inode, struct file *file) +{ + vfree(file->private_data); + + return 0; +} + +static ssize_t ath10k_fw_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { + const char *buf = file->private_data; + size_t len = strlen(buf); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_fw_stats = { + .open = ath10k_fw_stats_open, + .release = ath10k_fw_stats_release, + .read = ath10k_fw_stats_read, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath10k_debug_fw_reset_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ struct ath10k *ar = file->private_data; - struct ath10k_target_stats *fw_stats; - char *buf; - unsigned int len = 0, buf_len = 2500; - ssize_t ret_cnt; - long left; - int i; int ret; + size_t len = 0, buf_len = 500; + char *buf; - fw_stats = &ar->debug.target_stats; - - buf = kzalloc(buf_len, GFP_KERNEL); + buf = kmalloc(buf_len, GFP_KERNEL); if (!buf) return -ENOMEM; - ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT); + spin_lock_bh(&ar->data_lock); + + len += scnprintf(buf + len, buf_len - len, + "fw_crash_counter\t\t%d\n", ar->stats.fw_crash_counter); + len += scnprintf(buf + len, buf_len - len, + "fw_warm_reset_counter\t\t%d\n", + ar->stats.fw_warm_reset_counter); + len += scnprintf(buf + len, buf_len - len, + "fw_cold_reset_counter\t\t%d\n", + ar->stats.fw_cold_reset_counter); + + spin_unlock_bh(&ar->data_lock); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + kfree(buf); + + return ret; +} + +static const struct file_operations fops_fw_reset_stats = { + .open = simple_open, + .read = ath10k_debug_fw_reset_stats_read, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +/* This is a clean assert crash in firmware. */ +static int ath10k_debug_fw_assert(struct ath10k *ar) +{ + struct wmi_vdev_install_key_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + 16); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_vdev_install_key_cmd *)skb->data; + memset(cmd, 0, sizeof(*cmd)); + + /* big enough number so that firmware asserts */ + cmd->vdev_id = __cpu_to_le32(0x7ffe); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->vdev_install_key_cmdid); +} + +static ssize_t ath10k_read_simulate_fw_crash(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + const char buf[] = + "To simulate firmware crash write one of the keywords to this file:\n" + "`soft` - this will send WMI_FORCE_FW_HANG_ASSERT to firmware if FW supports that command.\n" + "`hard` - this will send to firmware command with illegal parameters causing firmware crash.\n" + "`assert` - this will send special illegal parameter to firmware to cause assert failure and crash.\n" + "`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n"; + + return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); +} + +/* Simulate firmware crash: + * 'soft': Call wmi command causing firmware hang. This firmware hang is + * recoverable by warm firmware reset. + * 'hard': Force firmware crash by setting any vdev parameter for not allowed + * vdev id. This is hard firmware crash because it is recoverable only by cold + * firmware reset. + */ +static ssize_t ath10k_write_simulate_fw_crash(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char buf[32] = {}; + ssize_t rc; + int ret; + + /* filter partial writes and invalid commands */ + if (*ppos != 0 || count >= sizeof(buf) || count == 0) + return -EINVAL; + + rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); + if (rc < 0) + return rc; + + /* drop the possible '\n' from the end */ + if (buf[*ppos - 1] == '\n') + buf[*ppos - 1] = '\0'; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON && + ar->state != ATH10K_STATE_RESTARTED) { + ret = -ENETDOWN; + goto exit; + } + + if (!strcmp(buf, "soft")) { + ath10k_info(ar, "simulating soft firmware crash\n"); + ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0); + } else if (!strcmp(buf, "hard")) { + ath10k_info(ar, "simulating hard firmware crash\n"); + /* 0x7fff is vdev id, and it is always out of range for all + * firmware variants in order to force a firmware crash. + */ + ret = ath10k_wmi_vdev_set_param(ar, 0x7fff, + ar->wmi.vdev_param->rts_threshold, + 0); + } else if (!strcmp(buf, "assert")) { + ath10k_info(ar, "simulating firmware assert crash\n"); + ret = ath10k_debug_fw_assert(ar); + } else if (!strcmp(buf, "hw-restart")) { + ath10k_info(ar, "user requested hw restart\n"); + ath10k_core_start_recovery(ar); + ret = 0; + } else { + ret = -EINVAL; + goto exit; + } + if (ret) { - ath10k_warn("could not request stats (%d)\n", ret); - kfree(buf); - return -EIO; + ath10k_warn(ar, "failed to simulate firmware crash: %d\n", ret); + goto exit; } - left = wait_for_completion_timeout(&ar->debug.event_stats_compl, 1*HZ); + ret = count; - if (left <= 0) { - kfree(buf); - return -ETIMEDOUT; +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static const struct file_operations fops_simulate_fw_crash = { + .read = ath10k_read_simulate_fw_crash, + .write = ath10k_write_simulate_fw_crash, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + size_t len; + char buf[50]; + + len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->bus_param.chip_id); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_chip_id = { + .read = ath10k_read_chip_id, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath10k_reg_addr_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u8 buf[32]; + size_t len = 0; + u32 reg_addr; + + mutex_lock(&ar->conf_mutex); + reg_addr = ar->debug.reg_addr; + mutex_unlock(&ar->conf_mutex); + + len += scnprintf(buf + len, sizeof(buf) - len, "0x%x\n", reg_addr); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath10k_reg_addr_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u32 reg_addr; + int ret; + + ret = kstrtou32_from_user(user_buf, count, 0, ®_addr); + if (ret) + return ret; + + if (!IS_ALIGNED(reg_addr, 4)) + return -EFAULT; + + mutex_lock(&ar->conf_mutex); + ar->debug.reg_addr = reg_addr; + mutex_unlock(&ar->conf_mutex); + + return count; +} + +static const struct file_operations fops_reg_addr = { + .read = ath10k_reg_addr_read, + .write = ath10k_reg_addr_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath10k_reg_value_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u8 buf[48]; + size_t len; + u32 reg_addr, reg_val; + int ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON && + ar->state != ATH10K_STATE_UTF) { + ret = -ENETDOWN; + goto exit; } + reg_addr = ar->debug.reg_addr; + + reg_val = ath10k_hif_read32(ar, reg_addr); + len = scnprintf(buf, sizeof(buf), "0x%08x:0x%08x\n", reg_addr, reg_val); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + +exit: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static ssize_t ath10k_reg_value_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u32 reg_addr, reg_val; + int ret; + mutex_lock(&ar->conf_mutex); - len += scnprintf(buf + len, buf_len - len, "\n"); - len += scnprintf(buf + len, buf_len - len, "%30s\n", - "ath10k PDEV stats"); - len += scnprintf(buf + len, buf_len - len, "%30s\n\n", - "================="); - - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Channel noise floor", fw_stats->ch_noise_floor); - len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "Channel TX power", fw_stats->chan_tx_power); - len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "TX frame count", fw_stats->tx_frame_count); - len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "RX frame count", fw_stats->rx_frame_count); - len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "RX clear count", fw_stats->rx_clear_count); - len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "Cycle count", fw_stats->cycle_count); - len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "PHY error count", fw_stats->phy_err_count); + if (ar->state != ATH10K_STATE_ON && + ar->state != ATH10K_STATE_UTF) { + ret = -ENETDOWN; + goto exit; + } - len += scnprintf(buf + len, buf_len - len, "\n"); - len += scnprintf(buf + len, buf_len - len, "%30s\n", - "ath10k PDEV TX stats"); - len += scnprintf(buf + len, buf_len - len, "%30s\n\n", - "================="); - - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "HTT cookies queued", fw_stats->comp_queued); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "HTT cookies disp.", fw_stats->comp_delivered); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MSDU queued", fw_stats->msdu_enqued); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDU queued", fw_stats->mpdu_enqued); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MSDUs dropped", fw_stats->wmm_drop); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Local enqued", fw_stats->local_enqued); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Local freed", fw_stats->local_freed); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "HW queued", fw_stats->hw_queued); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "PPDUs reaped", fw_stats->hw_reaped); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Num underruns", fw_stats->underrun); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "PPDUs cleaned", fw_stats->tx_abort); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDUs requed", fw_stats->mpdus_requed); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Excessive retries", fw_stats->tx_ko); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "HW rate", fw_stats->data_rc); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Sched self tiggers", fw_stats->self_triggers); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Dropped due to SW retries", - fw_stats->sw_retry_failure); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Illegal rate phy errors", - fw_stats->illgl_rate_phy_err); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Pdev continous xretry", fw_stats->pdev_cont_xretry); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "TX timeout", fw_stats->pdev_tx_timeout); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "PDEV resets", fw_stats->pdev_resets); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "PHY underrun", fw_stats->phy_underrun); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDU is more than txop limit", fw_stats->txop_ovf); + reg_addr = ar->debug.reg_addr; - len += scnprintf(buf + len, buf_len - len, "\n"); - len += scnprintf(buf + len, buf_len - len, "%30s\n", - "ath10k PDEV RX stats"); - len += scnprintf(buf + len, buf_len - len, "%30s\n\n", - "================="); - - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Mid PPDU route change", - fw_stats->mid_ppdu_route_change); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Tot. number of statuses", fw_stats->status_rcvd); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Extra frags on rings 0", fw_stats->r0_frags); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Extra frags on rings 1", fw_stats->r1_frags); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Extra frags on rings 2", fw_stats->r2_frags); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Extra frags on rings 3", fw_stats->r3_frags); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MSDUs delivered to HTT", fw_stats->htt_msdus); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDUs delivered to HTT", fw_stats->htt_mpdus); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MSDUs delivered to stack", fw_stats->loc_msdus); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDUs delivered to stack", fw_stats->loc_mpdus); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Oversized AMSUs", fw_stats->oversize_amsdu); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "PHY errors", fw_stats->phy_errs); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "PHY errors drops", fw_stats->phy_err_drop); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs); + ret = kstrtou32_from_user(user_buf, count, 0, ®_val); + if (ret) + goto exit; + + ath10k_hif_write32(ar, reg_addr, reg_val); + + ret = count; + +exit: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static const struct file_operations fops_reg_value = { + .read = ath10k_reg_value_read, + .write = ath10k_reg_value_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath10k_mem_value_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u8 *buf; + int ret; + + if (*ppos < 0) + return -EINVAL; + + if (!count) + return 0; + + mutex_lock(&ar->conf_mutex); + + buf = vmalloc(count); + if (!buf) { + ret = -ENOMEM; + goto exit; + } + + if (ar->state != ATH10K_STATE_ON && + ar->state != ATH10K_STATE_UTF) { + ret = -ENETDOWN; + goto exit; + } + + ret = ath10k_hif_diag_read(ar, *ppos, buf, count); + if (ret) { + ath10k_warn(ar, "failed to read address 0x%08x via diagnose window from debugfs: %d\n", + (u32)(*ppos), ret); + goto exit; + } + + ret = copy_to_user(user_buf, buf, count); + if (ret) { + ret = -EFAULT; + goto exit; + } + + count -= ret; + *ppos += count; + ret = count; + +exit: + vfree(buf); + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static ssize_t ath10k_mem_value_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u8 *buf; + int ret; + + if (*ppos < 0) + return -EINVAL; + + if (!count) + return 0; + + mutex_lock(&ar->conf_mutex); + + buf = vmalloc(count); + if (!buf) { + ret = -ENOMEM; + goto exit; + } + + if (ar->state != ATH10K_STATE_ON && + ar->state != ATH10K_STATE_UTF) { + ret = -ENETDOWN; + goto exit; + } + + ret = copy_from_user(buf, user_buf, count); + if (ret) { + ret = -EFAULT; + goto exit; + } + + ret = ath10k_hif_diag_write(ar, *ppos, buf, count); + if (ret) { + ath10k_warn(ar, "failed to write address 0x%08x via diagnose window from debugfs: %d\n", + (u32)(*ppos), ret); + goto exit; + } + + *ppos += count; + ret = count; + +exit: + vfree(buf); + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static const struct file_operations fops_mem_value = { + .read = ath10k_mem_value_read, + .write = ath10k_mem_value_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static int ath10k_debug_htt_stats_req(struct ath10k *ar) +{ + u64 cookie; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + if (ar->debug.htt_stats_mask == 0) + /* htt stats are disabled */ + return 0; + + if (ar->state != ATH10K_STATE_ON) + return 0; + + cookie = get_jiffies_64(); + + ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask, + ar->debug.reset_htt_stats, cookie); + if (ret) { + ath10k_warn(ar, "failed to send htt stats request: %d\n", ret); + return ret; + } + + queue_delayed_work(ar->workqueue, &ar->debug.htt_stats_dwork, + msecs_to_jiffies(ATH10K_DEBUG_HTT_STATS_INTERVAL)); + + return 0; +} + +static void ath10k_debug_htt_stats_dwork(struct work_struct *work) +{ + struct ath10k *ar = container_of(work, struct ath10k, + debug.htt_stats_dwork.work); + + mutex_lock(&ar->conf_mutex); + + ath10k_debug_htt_stats_req(ar); + + mutex_unlock(&ar->conf_mutex); +} + +static ssize_t ath10k_read_htt_stats_mask(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char buf[32]; + size_t len; + + len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath10k_write_htt_stats_mask(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + unsigned long mask; + int ret; + + ret = kstrtoul_from_user(user_buf, count, 0, &mask); + if (ret) + return ret; + + /* max 17 bit masks (for now) */ + if (mask > HTT_STATS_BIT_MASK) + return -E2BIG; + + mutex_lock(&ar->conf_mutex); + + ar->debug.htt_stats_mask = mask; + + ret = ath10k_debug_htt_stats_req(ar); + if (ret) + goto out; + + ret = count; + +out: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static const struct file_operations fops_htt_stats_mask = { + .read = ath10k_read_htt_stats_mask, + .write = ath10k_write_htt_stats_mask, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath10k_read_htt_max_amsdu_ampdu(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char buf[64]; + u8 amsdu, ampdu; + size_t len; + + mutex_lock(&ar->conf_mutex); + + amsdu = ar->htt.max_num_amsdu; + ampdu = ar->htt.max_num_ampdu; + mutex_unlock(&ar->conf_mutex); + + len = scnprintf(buf, sizeof(buf), "%u %u\n", amsdu, ampdu); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath10k_write_htt_max_amsdu_ampdu(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + int res; + char buf[64] = {}; + unsigned int amsdu, ampdu; + + res = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, + user_buf, count); + if (res <= 0) + return res; + + res = sscanf(buf, "%u %u", &amsdu, &du); + + if (res != 2) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + + res = ath10k_htt_h2t_aggr_cfg_msg(&ar->htt, ampdu, amsdu); + if (res) + goto out; + + res = count; + ar->htt.max_num_amsdu = amsdu; + ar->htt.max_num_ampdu = ampdu; + +out: + mutex_unlock(&ar->conf_mutex); + return res; +} + +static const struct file_operations fops_htt_max_amsdu_ampdu = { + .read = ath10k_read_htt_max_amsdu_ampdu, + .write = ath10k_write_htt_max_amsdu_ampdu, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath10k_read_fw_dbglog(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + size_t len; + char buf[96]; + + len = scnprintf(buf, sizeof(buf), "0x%16llx %u\n", + ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath10k_write_fw_dbglog(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + int ret; + char buf[96] = {}; + unsigned int log_level; + u64 mask; + + ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, + user_buf, count); + if (ret <= 0) + return ret; + + ret = sscanf(buf, "%llx %u", &mask, &log_level); + + if (!ret) + return -EINVAL; + + if (ret == 1) + /* default if user did not specify */ + log_level = ATH10K_DBGLOG_LEVEL_WARN; + + mutex_lock(&ar->conf_mutex); + + ar->debug.fw_dbglog_mask = mask; + ar->debug.fw_dbglog_level = log_level; + + if (ar->state == ATH10K_STATE_ON) { + ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask, + ar->debug.fw_dbglog_level); + if (ret) { + ath10k_warn(ar, "dbglog cfg failed from debugfs: %d\n", + ret); + goto exit; + } + } + + ret = count; + +exit: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +/* TODO: Would be nice to always support ethtool stats, would need to + * move the stats storage out of ath10k_debug, or always have ath10k_debug + * struct available.. + */ + +/* This generally corresponds to the debugfs fw_stats file */ +static const char ath10k_gstrings_stats[][ETH_GSTRING_LEN] = { + "tx_pkts_nic", + "tx_bytes_nic", + "rx_pkts_nic", + "rx_bytes_nic", + "d_noise_floor", + "d_cycle_count", + "d_phy_error", + "d_rts_bad", + "d_rts_good", + "d_tx_power", /* in .5 dbM I think */ + "d_rx_crc_err", /* fcs_bad */ + "d_rx_crc_err_drop", /* frame with FCS error, dropped late in kernel */ + "d_no_beacon", + "d_tx_mpdus_queued", + "d_tx_msdu_queued", + "d_tx_msdu_dropped", + "d_local_enqued", + "d_local_freed", + "d_tx_ppdu_hw_queued", + "d_tx_ppdu_reaped", + "d_tx_fifo_underrun", + "d_tx_ppdu_abort", + "d_tx_mpdu_requeued", + "d_tx_excessive_retries", + "d_tx_hw_rate", + "d_tx_dropped_sw_retries", + "d_tx_illegal_rate", + "d_tx_continuous_xretries", + "d_tx_timeout", + "d_tx_mpdu_txop_limit", + "d_pdev_resets", + "d_rx_mid_ppdu_route_change", + "d_rx_status", + "d_rx_extra_frags_ring0", + "d_rx_extra_frags_ring1", + "d_rx_extra_frags_ring2", + "d_rx_extra_frags_ring3", + "d_rx_msdu_htt", + "d_rx_mpdu_htt", + "d_rx_msdu_stack", + "d_rx_mpdu_stack", + "d_rx_phy_err", + "d_rx_phy_err_drops", + "d_rx_mpdu_errors", /* FCS, MIC, ENC */ + "d_fw_crash_count", + "d_fw_warm_reset_count", + "d_fw_cold_reset_count", +}; + +#define ATH10K_SSTATS_LEN ARRAY_SIZE(ath10k_gstrings_stats) + +void ath10k_debug_get_et_strings(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u32 sset, u8 *data) +{ + if (sset == ETH_SS_STATS) + memcpy(data, ath10k_gstrings_stats, + sizeof(ath10k_gstrings_stats)); +} + +int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int sset) +{ + if (sset == ETH_SS_STATS) + return ATH10K_SSTATS_LEN; + + return 0; +} + +void ath10k_debug_get_et_stats(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ethtool_stats *stats, u64 *data) +{ + struct ath10k *ar = hw->priv; + static const struct ath10k_fw_stats_pdev zero_stats = {}; + const struct ath10k_fw_stats_pdev *pdev_stats; + int i = 0, ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state == ATH10K_STATE_ON) { + ret = ath10k_debug_fw_stats_request(ar); + if (ret) { + /* just print a warning and try to use older results */ + ath10k_warn(ar, + "failed to get fw stats for ethtool: %d\n", + ret); + } + } + + pdev_stats = list_first_entry_or_null(&ar->debug.fw_stats.pdevs, + struct ath10k_fw_stats_pdev, + list); + if (!pdev_stats) { + /* no results available so just return zeroes */ + pdev_stats = &zero_stats; + } + + spin_lock_bh(&ar->data_lock); + + data[i++] = pdev_stats->hw_reaped; /* ppdu reaped */ + data[i++] = 0; /* tx bytes */ + data[i++] = pdev_stats->htt_mpdus; + data[i++] = 0; /* rx bytes */ + data[i++] = pdev_stats->ch_noise_floor; + data[i++] = pdev_stats->cycle_count; + data[i++] = pdev_stats->phy_err_count; + data[i++] = pdev_stats->rts_bad; + data[i++] = pdev_stats->rts_good; + data[i++] = pdev_stats->chan_tx_power; + data[i++] = pdev_stats->fcs_bad; + data[i++] = ar->stats.rx_crc_err_drop; + data[i++] = pdev_stats->no_beacons; + data[i++] = pdev_stats->mpdu_enqued; + data[i++] = pdev_stats->msdu_enqued; + data[i++] = pdev_stats->wmm_drop; + data[i++] = pdev_stats->local_enqued; + data[i++] = pdev_stats->local_freed; + data[i++] = pdev_stats->hw_queued; + data[i++] = pdev_stats->hw_reaped; + data[i++] = pdev_stats->underrun; + data[i++] = pdev_stats->tx_abort; + data[i++] = pdev_stats->mpdus_requeued; + data[i++] = pdev_stats->tx_ko; + data[i++] = pdev_stats->data_rc; + data[i++] = pdev_stats->sw_retry_failure; + data[i++] = pdev_stats->illgl_rate_phy_err; + data[i++] = pdev_stats->pdev_cont_xretry; + data[i++] = pdev_stats->pdev_tx_timeout; + data[i++] = pdev_stats->txop_ovf; + data[i++] = pdev_stats->pdev_resets; + data[i++] = pdev_stats->mid_ppdu_route_change; + data[i++] = pdev_stats->status_rcvd; + data[i++] = pdev_stats->r0_frags; + data[i++] = pdev_stats->r1_frags; + data[i++] = pdev_stats->r2_frags; + data[i++] = pdev_stats->r3_frags; + data[i++] = pdev_stats->htt_msdus; + data[i++] = pdev_stats->htt_mpdus; + data[i++] = pdev_stats->loc_msdus; + data[i++] = pdev_stats->loc_mpdus; + data[i++] = pdev_stats->phy_errs; + data[i++] = pdev_stats->phy_err_drop; + data[i++] = pdev_stats->mpdu_errs; + data[i++] = ar->stats.fw_crash_counter; + data[i++] = ar->stats.fw_warm_reset_counter; + data[i++] = ar->stats.fw_cold_reset_counter; + + spin_unlock_bh(&ar->data_lock); + + mutex_unlock(&ar->conf_mutex); + + WARN_ON(i != ATH10K_SSTATS_LEN); +} + +static const struct file_operations fops_fw_dbglog = { + .read = ath10k_read_fw_dbglog, + .write = ath10k_write_fw_dbglog, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static int ath10k_debug_cal_data_fetch(struct ath10k *ar) +{ + u32 hi_addr; + __le32 addr; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + if (WARN_ON(ar->hw_params.cal_data_len > ATH10K_DEBUG_CAL_DATA_LEN)) + return -EINVAL; + + if (ar->hw_params.cal_data_len == 0) + return -EOPNOTSUPP; + + hi_addr = host_interest_item_address(HI_ITEM(hi_board_data)); + + ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr)); + if (ret) { + ath10k_warn(ar, "failed to read hi_board_data address: %d\n", + ret); + return ret; + } + + ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), ar->debug.cal_data, + ar->hw_params.cal_data_len); + if (ret) { + ath10k_warn(ar, "failed to read calibration data: %d\n", ret); + return ret; + } + + return 0; +} + +static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file) +{ + struct ath10k *ar = inode->i_private; + + mutex_lock(&ar->conf_mutex); + + if (ar->state == ATH10K_STATE_ON || + ar->state == ATH10K_STATE_UTF) { + ath10k_debug_cal_data_fetch(ar); + } + + file->private_data = ar; + mutex_unlock(&ar->conf_mutex); + + return 0; +} + +static ssize_t ath10k_debug_cal_data_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + + mutex_lock(&ar->conf_mutex); + + count = simple_read_from_buffer(user_buf, count, ppos, + ar->debug.cal_data, + ar->hw_params.cal_data_len); + + mutex_unlock(&ar->conf_mutex); + + return count; +} + +static ssize_t ath10k_write_ani_enable(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + int ret; + u8 enable; + + if (kstrtou8_from_user(user_buf, count, 0, &enable)) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + + if (ar->ani_enabled == enable) { + ret = count; + goto exit; + } + + ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->ani_enable, + enable); + if (ret) { + ath10k_warn(ar, "ani_enable failed from debugfs: %d\n", ret); + goto exit; + } + ar->ani_enabled = enable; + + ret = count; + +exit: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static ssize_t ath10k_read_ani_enable(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + size_t len; + char buf[32]; + + len = scnprintf(buf, sizeof(buf), "%d\n", ar->ani_enabled); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_ani_enable = { + .read = ath10k_read_ani_enable, + .write = ath10k_write_ani_enable, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static const struct file_operations fops_cal_data = { + .open = ath10k_debug_cal_data_open, + .read = ath10k_debug_cal_data_read, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath10k_read_nf_cal_period(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + size_t len; + char buf[32]; + + len = scnprintf(buf, sizeof(buf), "%d\n", ar->debug.nf_cal_period); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath10k_write_nf_cal_period(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + unsigned long period; + int ret; + + ret = kstrtoul_from_user(user_buf, count, 0, &period); + if (ret) + return ret; + + if (period > WMI_PDEV_PARAM_CAL_PERIOD_MAX) + return -EINVAL; + + /* there's no way to switch back to the firmware default */ + if (period == 0) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + + ar->debug.nf_cal_period = period; + + if (ar->state != ATH10K_STATE_ON) { + /* firmware is not running, nothing else to do */ + ret = count; + goto exit; + } + + ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->cal_period, + ar->debug.nf_cal_period); + if (ret) { + ath10k_warn(ar, "cal period cfg failed from debugfs: %d\n", + ret); + goto exit; + } + + ret = count; + +exit: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static const struct file_operations fops_nf_cal_period = { + .read = ath10k_read_nf_cal_period, + .write = ath10k_write_nf_cal_period, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +#define ATH10K_TPC_CONFIG_BUF_SIZE (1024 * 1024) + +static int ath10k_debug_tpc_stats_request(struct ath10k *ar) +{ + int ret; + unsigned long time_left; + + lockdep_assert_held(&ar->conf_mutex); + + reinit_completion(&ar->debug.tpc_complete); + + ret = ath10k_wmi_pdev_get_tpc_config(ar, WMI_TPC_CONFIG_PARAM); + if (ret) { + ath10k_warn(ar, "failed to request tpc config: %d\n", ret); + return ret; + } + + time_left = wait_for_completion_timeout(&ar->debug.tpc_complete, + 1 * HZ); + if (time_left == 0) + return -ETIMEDOUT; + + return 0; +} + +void ath10k_debug_tpc_stats_process(struct ath10k *ar, + struct ath10k_tpc_stats *tpc_stats) +{ + spin_lock_bh(&ar->data_lock); + + kfree(ar->debug.tpc_stats); + ar->debug.tpc_stats = tpc_stats; + complete(&ar->debug.tpc_complete); + + spin_unlock_bh(&ar->data_lock); +} + +void +ath10k_debug_tpc_stats_final_process(struct ath10k *ar, + struct ath10k_tpc_stats_final *tpc_stats) +{ + spin_lock_bh(&ar->data_lock); + + kfree(ar->debug.tpc_stats_final); + ar->debug.tpc_stats_final = tpc_stats; + complete(&ar->debug.tpc_complete); + + spin_unlock_bh(&ar->data_lock); +} + +static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats, + unsigned int j, char *buf, size_t *len) +{ + int i; + size_t buf_len; + static const char table_str[][5] = { "CDD", + "STBC", + "TXBF" }; + static const char pream_str[][6] = { "CCK", + "OFDM", + "HT20", + "HT40", + "VHT20", + "VHT40", + "VHT80", + "HTCUP" }; + + buf_len = ATH10K_TPC_CONFIG_BUF_SIZE; + *len += scnprintf(buf + *len, buf_len - *len, + "********************************\n"); + *len += scnprintf(buf + *len, buf_len - *len, + "******************* %s POWER TABLE ****************\n", + table_str[j]); + *len += scnprintf(buf + *len, buf_len - *len, + "********************************\n"); + *len += scnprintf(buf + *len, buf_len - *len, + "No. Preamble Rate_code "); + + for (i = 0; i < tpc_stats->num_tx_chain; i++) + *len += scnprintf(buf + *len, buf_len - *len, + "tpc_value%d ", i); + + *len += scnprintf(buf + *len, buf_len - *len, "\n"); + + for (i = 0; i < tpc_stats->rate_max; i++) { + *len += scnprintf(buf + *len, buf_len - *len, + "%8d %s 0x%2x %s\n", i, + pream_str[tpc_stats->tpc_table[j].pream_idx[i]], + tpc_stats->tpc_table[j].rate_code[i], + tpc_stats->tpc_table[j].tpc_value[i]); + } + + *len += scnprintf(buf + *len, buf_len - *len, + "***********************************\n"); +} + +static void ath10k_tpc_stats_fill(struct ath10k *ar, + struct ath10k_tpc_stats *tpc_stats, + char *buf) +{ + int j; + size_t len, buf_len; + + len = 0; + buf_len = ATH10K_TPC_CONFIG_BUF_SIZE; + + spin_lock_bh(&ar->data_lock); + + if (!tpc_stats) { + ath10k_warn(ar, "failed to get tpc stats\n"); + goto unlock; + } len += scnprintf(buf + len, buf_len - len, "\n"); - len += scnprintf(buf + len, buf_len - len, "%30s\n", - "ath10k PEER stats"); - len += scnprintf(buf + len, buf_len - len, "%30s\n\n", - "================="); - - for (i = 0; i < fw_stats->peers; i++) { - len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", - "Peer MAC address", - fw_stats->peer_stat[i].peer_macaddr); - len += scnprintf(buf + len, buf_len - len, "%30s %u\n", - "Peer RSSI", fw_stats->peer_stat[i].peer_rssi); - len += scnprintf(buf + len, buf_len - len, "%30s %u\n", - "Peer TX rate", - fw_stats->peer_stat[i].peer_tx_rate); - len += scnprintf(buf + len, buf_len - len, "\n"); - } - - if (len > buf_len) - len = buf_len; + len += scnprintf(buf + len, buf_len - len, + "*************************************\n"); + len += scnprintf(buf + len, buf_len - len, + "TPC config for channel %4d mode %d\n", + tpc_stats->chan_freq, + tpc_stats->phy_mode); + len += scnprintf(buf + len, buf_len - len, + "*************************************\n"); + len += scnprintf(buf + len, buf_len - len, + "CTL = 0x%2x Reg. Domain = %2d\n", + tpc_stats->ctl, + tpc_stats->reg_domain); + len += scnprintf(buf + len, buf_len - len, + "Antenna Gain = %2d Reg. Max Antenna Gain = %2d\n", + tpc_stats->twice_antenna_gain, + tpc_stats->twice_antenna_reduction); + len += scnprintf(buf + len, buf_len - len, + "Power Limit = %2d Reg. Max Power = %2d\n", + tpc_stats->power_limit, + tpc_stats->twice_max_rd_power / 2); + len += scnprintf(buf + len, buf_len - len, + "Num tx chains = %2d Num supported rates = %2d\n", + tpc_stats->num_tx_chain, + tpc_stats->rate_max); + + for (j = 0; j < WMI_TPC_FLAG; j++) { + switch (j) { + case WMI_TPC_TABLE_TYPE_CDD: + if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) { + len += scnprintf(buf + len, buf_len - len, + "CDD not supported\n"); + break; + } + + ath10k_tpc_stats_print(tpc_stats, j, buf, &len); + break; + case WMI_TPC_TABLE_TYPE_STBC: + if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) { + len += scnprintf(buf + len, buf_len - len, + "STBC not supported\n"); + break; + } + + ath10k_tpc_stats_print(tpc_stats, j, buf, &len); + break; + case WMI_TPC_TABLE_TYPE_TXBF: + if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) { + len += scnprintf(buf + len, buf_len - len, + "TXBF not supported\n***************************\n"); + break; + } + + ath10k_tpc_stats_print(tpc_stats, j, buf, &len); + break; + default: + len += scnprintf(buf + len, buf_len - len, + "Invalid Type\n"); + break; + } + } + +unlock: + spin_unlock_bh(&ar->data_lock); + + if (len >= buf_len) + buf[len - 1] = 0; + else + buf[len] = 0; +} + +static int ath10k_tpc_stats_open(struct inode *inode, struct file *file) +{ + struct ath10k *ar = inode->i_private; + void *buf = NULL; + int ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON) { + ret = -ENETDOWN; + goto err_unlock; + } + + buf = vmalloc(ATH10K_TPC_CONFIG_BUF_SIZE); + if (!buf) { + ret = -ENOMEM; + goto err_unlock; + } + + ret = ath10k_debug_tpc_stats_request(ar); + if (ret) { + ath10k_warn(ar, "failed to request tpc config stats: %d\n", + ret); + goto err_free; + } + + ath10k_tpc_stats_fill(ar, ar->debug.tpc_stats, buf); + file->private_data = buf; + + mutex_unlock(&ar->conf_mutex); + return 0; + +err_free: + vfree(buf); + +err_unlock: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static int ath10k_tpc_stats_release(struct inode *inode, struct file *file) +{ + vfree(file->private_data); + + return 0; +} + +static ssize_t ath10k_tpc_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + const char *buf = file->private_data; + size_t len = strlen(buf); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_tpc_stats = { + .open = ath10k_tpc_stats_open, + .release = ath10k_tpc_stats_release, + .read = ath10k_tpc_stats_read, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +int ath10k_debug_start(struct ath10k *ar) +{ + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + ret = ath10k_debug_htt_stats_req(ar); + if (ret) + /* continue normally anyway, this isn't serious */ + ath10k_warn(ar, "failed to start htt stats workqueue: %d\n", + ret); + + if (ar->debug.fw_dbglog_mask) { + ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask, + ATH10K_DBGLOG_LEVEL_WARN); + if (ret) + /* not serious */ + ath10k_warn(ar, "failed to enable dbglog during start: %d", + ret); + } + + if (ar->pktlog_filter) { + ret = ath10k_wmi_pdev_pktlog_enable(ar, + ar->pktlog_filter); + if (ret) + /* not serious */ + ath10k_warn(ar, + "failed to enable pktlog filter %x: %d\n", + ar->pktlog_filter, ret); + } else { + ret = ath10k_wmi_pdev_pktlog_disable(ar); + if (ret) + /* not serious */ + ath10k_warn(ar, "failed to disable pktlog: %d\n", ret); + } + + if (ar->debug.nf_cal_period && + !test_bit(ATH10K_FW_FEATURE_NON_BMI, + ar->normal_mode_fw.fw_file.fw_features)) { + ret = ath10k_wmi_pdev_set_param(ar, + ar->wmi.pdev_param->cal_period, + ar->debug.nf_cal_period); + if (ret) + /* not serious */ + ath10k_warn(ar, "cal period cfg failed from debug start: %d\n", + ret); + } + + return ret; +} + +void ath10k_debug_stop(struct ath10k *ar) +{ + lockdep_assert_held(&ar->conf_mutex); + + if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, + ar->normal_mode_fw.fw_file.fw_features)) + ath10k_debug_cal_data_fetch(ar); + + /* Must not use _sync to avoid deadlock, we do that in + * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid + * warning from timer_delete(). + */ + if (ar->debug.htt_stats_mask != 0) + cancel_delayed_work(&ar->debug.htt_stats_dwork); + + ath10k_wmi_pdev_pktlog_disable(ar); +} + +static ssize_t ath10k_write_simulate_radar(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + struct ath10k_vif *arvif; + + /* Just check for the first vif alone, as all the vifs will be + * sharing the same channel and if the channel is disabled, all the + * vifs will share the same 'is_started' state. + */ + arvif = list_first_entry(&ar->arvifs, typeof(*arvif), list); + if (!arvif->is_started) + return -EINVAL; + + ieee80211_radar_detected(ar->hw, NULL); + + return count; +} + +static const struct file_operations fops_simulate_radar = { + .write = ath10k_write_simulate_radar, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +#define ATH10K_DFS_STAT(s, p) (\ + len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \ + ar->debug.dfs_stats.p)) + +#define ATH10K_DFS_POOL_STAT(s, p) (\ + len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \ + ar->debug.dfs_pool_stats.p)) + +static ssize_t ath10k_read_dfs_stats(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + int retval = 0, len = 0; + const int size = 8000; + struct ath10k *ar = file->private_data; + char *buf; + + buf = kzalloc(size, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + if (!ar->dfs_detector) { + len += scnprintf(buf + len, size - len, "DFS not enabled\n"); + goto exit; + } + + ar->debug.dfs_pool_stats = + ar->dfs_detector->get_stats(ar->dfs_detector); + + len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n"); + + ATH10K_DFS_STAT("reported phy errors", phy_errors); + ATH10K_DFS_STAT("pulse events reported", pulses_total); + ATH10K_DFS_STAT("DFS pulses detected", pulses_detected); + ATH10K_DFS_STAT("DFS pulses discarded", pulses_discarded); + ATH10K_DFS_STAT("Radars detected", radar_detected); + + len += scnprintf(buf + len, size - len, "Global Pool statistics:\n"); + ATH10K_DFS_POOL_STAT("Pool references", pool_reference); + ATH10K_DFS_POOL_STAT("Pulses allocated", pulse_allocated); + ATH10K_DFS_POOL_STAT("Pulses alloc error", pulse_alloc_error); + ATH10K_DFS_POOL_STAT("Pulses in use", pulse_used); + ATH10K_DFS_POOL_STAT("Seqs. allocated", pseq_allocated); + ATH10K_DFS_POOL_STAT("Seqs. alloc error", pseq_alloc_error); + ATH10K_DFS_POOL_STAT("Seqs. in use", pseq_used); + +exit: + if (len > size) + len = size; + + retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + + return retval; +} + +static const struct file_operations fops_dfs_stats = { + .read = ath10k_read_dfs_stats, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath10k_write_pktlog_filter(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u32 filter; + int ret; + + if (kstrtouint_from_user(ubuf, count, 0, &filter)) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON) { + ar->pktlog_filter = filter; + ret = count; + goto out; + } + + if (filter == ar->pktlog_filter) { + ret = count; + goto out; + } + + if (filter) { + ret = ath10k_wmi_pdev_pktlog_enable(ar, filter); + if (ret) { + ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n", + ar->pktlog_filter, ret); + goto out; + } + } else { + ret = ath10k_wmi_pdev_pktlog_disable(ar); + if (ret) { + ath10k_warn(ar, "failed to disable pktlog: %d\n", ret); + goto out; + } + } + + ar->pktlog_filter = filter; + ret = count; + +out: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static ssize_t ath10k_read_pktlog_filter(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + char buf[32]; + struct ath10k *ar = file->private_data; + int len = 0; + + mutex_lock(&ar->conf_mutex); + len = scnprintf(buf, sizeof(buf) - len, "%08x\n", + ar->pktlog_filter); + mutex_unlock(&ar->conf_mutex); + + return simple_read_from_buffer(ubuf, count, ppos, buf, len); +} + +static const struct file_operations fops_pktlog_filter = { + .read = ath10k_read_pktlog_filter, + .write = ath10k_write_pktlog_filter, + .open = simple_open +}; + +static ssize_t ath10k_write_quiet_period(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u32 period; + + if (kstrtouint_from_user(ubuf, count, 0, &period)) + return -EINVAL; + + if (period < ATH10K_QUIET_PERIOD_MIN) { + ath10k_warn(ar, "Quiet period %u can not be lesser than 25ms\n", + period); + return -EINVAL; + } + mutex_lock(&ar->conf_mutex); + ar->thermal.quiet_period = period; + ath10k_thermal_set_throttling(ar); + mutex_unlock(&ar->conf_mutex); + + return count; +} + +static ssize_t ath10k_read_quiet_period(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + char buf[32]; + struct ath10k *ar = file->private_data; + int len = 0; + + mutex_lock(&ar->conf_mutex); + len = scnprintf(buf, sizeof(buf) - len, "%d\n", + ar->thermal.quiet_period); + mutex_unlock(&ar->conf_mutex); + + return simple_read_from_buffer(ubuf, count, ppos, buf, len); +} + +static const struct file_operations fops_quiet_period = { + .read = ath10k_read_quiet_period, + .write = ath10k_write_quiet_period, + .open = simple_open +}; + +static ssize_t ath10k_write_btcoex(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + ssize_t ret; + bool val; + u32 pdev_param; + + ret = kstrtobool_from_user(ubuf, count, &val); + if (ret) + return ret; + + if (!ar->coex_support) + return -EOPNOTSUPP; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON && + ar->state != ATH10K_STATE_RESTARTED) { + ret = -ENETDOWN; + goto exit; + } + + if (!(test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) ^ val)) { + ret = count; + goto exit; + } + + pdev_param = ar->wmi.pdev_param->enable_btcoex; + if (test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, + ar->running_fw->fw_file.fw_features)) { + ret = ath10k_wmi_pdev_set_param(ar, pdev_param, val); + if (ret) { + ath10k_warn(ar, "failed to enable btcoex: %zd\n", ret); + ret = count; + goto exit; + } + } else { + ath10k_info(ar, "restarting firmware due to btcoex change"); + ath10k_core_start_recovery(ar); + } + + if (val) + set_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); + else + clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); + + ret = count; + +exit: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static ssize_t ath10k_read_btcoex(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + char buf[32]; + struct ath10k *ar = file->private_data; + int len = 0; + + mutex_lock(&ar->conf_mutex); + len = scnprintf(buf, sizeof(buf) - len, "%d\n", + test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags)); + mutex_unlock(&ar->conf_mutex); + + return simple_read_from_buffer(ubuf, count, ppos, buf, len); +} + +static const struct file_operations fops_btcoex = { + .read = ath10k_read_btcoex, + .write = ath10k_write_btcoex, + .open = simple_open +}; + +static ssize_t ath10k_write_enable_extd_tx_stats(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u32 filter; + int ret; + + if (kstrtouint_from_user(ubuf, count, 0, &filter)) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON) { + ar->debug.enable_extd_tx_stats = filter; + ret = count; + goto out; + } + + if (filter == ar->debug.enable_extd_tx_stats) { + ret = count; + goto out; + } + + ar->debug.enable_extd_tx_stats = filter; + ret = count; + +out: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static ssize_t ath10k_read_enable_extd_tx_stats(struct file *file, + char __user *ubuf, + size_t count, loff_t *ppos) + +{ + char buf[32]; + struct ath10k *ar = file->private_data; + int len = 0; + + mutex_lock(&ar->conf_mutex); + len = scnprintf(buf, sizeof(buf) - len, "%08x\n", + ar->debug.enable_extd_tx_stats); + mutex_unlock(&ar->conf_mutex); + + return simple_read_from_buffer(ubuf, count, ppos, buf, len); +} + +static const struct file_operations fops_enable_extd_tx_stats = { + .read = ath10k_read_enable_extd_tx_stats, + .write = ath10k_write_enable_extd_tx_stats, + .open = simple_open +}; + +static ssize_t ath10k_write_peer_stats(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + ssize_t ret; + bool val; + + ret = kstrtobool_from_user(ubuf, count, &val); + if (ret) + return ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON && + ar->state != ATH10K_STATE_RESTARTED) { + ret = -ENETDOWN; + goto exit; + } + + if (!(test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) ^ val)) { + ret = count; + goto exit; + } + + if (val) + set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags); + else + clear_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags); + + ath10k_info(ar, "restarting firmware due to Peer stats change"); + + ath10k_core_start_recovery(ar); + ret = count; + +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static ssize_t ath10k_read_peer_stats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) + +{ + char buf[32]; + struct ath10k *ar = file->private_data; + int len = 0; + + mutex_lock(&ar->conf_mutex); + len = scnprintf(buf, sizeof(buf) - len, "%d\n", + test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags)); + mutex_unlock(&ar->conf_mutex); + + return simple_read_from_buffer(ubuf, count, ppos, buf, len); +} + +static const struct file_operations fops_peer_stats = { + .read = ath10k_read_peer_stats, + .write = ath10k_write_peer_stats, + .open = simple_open +}; + +static ssize_t ath10k_debug_fw_checksums_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + size_t len = 0, buf_len = 4096; + ssize_t ret_cnt; + char *buf; + + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&ar->conf_mutex); + + len += scnprintf(buf + len, buf_len - len, + "firmware-N.bin\t\t%08x\n", + crc32_le(0, ar->normal_mode_fw.fw_file.firmware->data, + ar->normal_mode_fw.fw_file.firmware->size)); + len += scnprintf(buf + len, buf_len - len, + "athwlan\t\t\t%08x\n", + crc32_le(0, ar->normal_mode_fw.fw_file.firmware_data, + ar->normal_mode_fw.fw_file.firmware_len)); + len += scnprintf(buf + len, buf_len - len, + "otp\t\t\t%08x\n", + crc32_le(0, ar->normal_mode_fw.fw_file.otp_data, + ar->normal_mode_fw.fw_file.otp_len)); + len += scnprintf(buf + len, buf_len - len, + "codeswap\t\t%08x\n", + crc32_le(0, ar->normal_mode_fw.fw_file.codeswap_data, + ar->normal_mode_fw.fw_file.codeswap_len)); + len += scnprintf(buf + len, buf_len - len, + "board-N.bin\t\t%08x\n", + crc32_le(0, ar->normal_mode_fw.board->data, + ar->normal_mode_fw.board->size)); + len += scnprintf(buf + len, buf_len - len, + "board\t\t\t%08x\n", + crc32_le(0, ar->normal_mode_fw.board_data, + ar->normal_mode_fw.board_len)); ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); @@ -436,35 +2204,458 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf, return ret_cnt; } -static const struct file_operations fops_fw_stats = { - .read = ath10k_read_fw_stats, +static const struct file_operations fops_fw_checksums = { + .read = ath10k_debug_fw_checksums_read, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; +static ssize_t ath10k_sta_tid_stats_mask_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char buf[32]; + size_t len; + + len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->sta_tid_stats_mask); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath10k_sta_tid_stats_mask_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + ssize_t ret; + u32 mask; + + ret = kstrtoint_from_user(user_buf, count, 0, &mask); + if (ret) + return ret; + + ar->sta_tid_stats_mask = mask; + + return count; +} + +static const struct file_operations fops_sta_tid_stats_mask = { + .read = ath10k_sta_tid_stats_mask_read, + .write = ath10k_sta_tid_stats_mask_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static int ath10k_debug_tpc_stats_final_request(struct ath10k *ar) +{ + int ret; + unsigned long time_left; + + lockdep_assert_held(&ar->conf_mutex); + + reinit_completion(&ar->debug.tpc_complete); + + ret = ath10k_wmi_pdev_get_tpc_table_cmdid(ar, WMI_TPC_CONFIG_PARAM); + if (ret) { + ath10k_warn(ar, "failed to request tpc table cmdid: %d\n", ret); + return ret; + } + + time_left = wait_for_completion_timeout(&ar->debug.tpc_complete, + 1 * HZ); + if (time_left == 0) + return -ETIMEDOUT; + + return 0; +} + +static int ath10k_tpc_stats_final_open(struct inode *inode, struct file *file) +{ + struct ath10k *ar = inode->i_private; + void *buf; + int ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON) { + ret = -ENETDOWN; + goto err_unlock; + } + + buf = vmalloc(ATH10K_TPC_CONFIG_BUF_SIZE); + if (!buf) { + ret = -ENOMEM; + goto err_unlock; + } + + ret = ath10k_debug_tpc_stats_final_request(ar); + if (ret) { + ath10k_warn(ar, "failed to request tpc stats final: %d\n", + ret); + goto err_free; + } + + ath10k_tpc_stats_fill(ar, ar->debug.tpc_stats, buf); + file->private_data = buf; + + mutex_unlock(&ar->conf_mutex); + return 0; + +err_free: + vfree(buf); + +err_unlock: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static int ath10k_tpc_stats_final_release(struct inode *inode, + struct file *file) +{ + vfree(file->private_data); + + return 0; +} + +static ssize_t ath10k_tpc_stats_final_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + const char *buf = file->private_data; + unsigned int len = strlen(buf); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_tpc_stats_final = { + .open = ath10k_tpc_stats_final_open, + .release = ath10k_tpc_stats_final_release, + .read = ath10k_tpc_stats_final_read, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath10k_write_warm_hw_reset(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + int ret; + bool val; + + if (kstrtobool_from_user(user_buf, count, &val)) + return -EFAULT; + + if (!val) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON) { + ret = -ENETDOWN; + goto exit; + } + + ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pdev_reset, + WMI_RST_MODE_WARM_RESET); + + if (ret) { + ath10k_warn(ar, "failed to enable warm hw reset: %d\n", ret); + goto exit; + } + + ret = count; + +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static const struct file_operations fops_warm_hw_reset = { + .write = ath10k_write_warm_hw_reset, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static void ath10k_peer_ps_state_disable(void *data, + struct ieee80211_sta *sta) +{ + struct ath10k *ar = data; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + + spin_lock_bh(&ar->data_lock); + arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED; + spin_unlock_bh(&ar->data_lock); +} + +static ssize_t ath10k_write_ps_state_enable(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + int ret; + u32 param; + u8 ps_state_enable; + + if (kstrtou8_from_user(user_buf, count, 0, &ps_state_enable)) + return -EINVAL; + + if (ps_state_enable > 1) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + + if (ar->ps_state_enable == ps_state_enable) { + ret = count; + goto exit; + } + + param = ar->wmi.pdev_param->peer_sta_ps_statechg_enable; + ret = ath10k_wmi_pdev_set_param(ar, param, ps_state_enable); + if (ret) { + ath10k_warn(ar, "failed to enable ps_state_enable: %d\n", + ret); + goto exit; + } + ar->ps_state_enable = ps_state_enable; + + if (!ar->ps_state_enable) + ieee80211_iterate_stations_atomic(ar->hw, + ath10k_peer_ps_state_disable, + ar); + + ret = count; + +exit: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static ssize_t ath10k_read_ps_state_enable(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + int len = 0; + char buf[32]; + + mutex_lock(&ar->conf_mutex); + len = scnprintf(buf, sizeof(buf) - len, "%d\n", + ar->ps_state_enable); + mutex_unlock(&ar->conf_mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_ps_state_enable = { + .read = ath10k_read_ps_state_enable, + .write = ath10k_write_ps_state_enable, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath10k_write_reset_htt_stats(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + unsigned long reset; + int ret; + + ret = kstrtoul_from_user(user_buf, count, 0, &reset); + if (ret) + return ret; + + if (reset == 0 || reset > 0x1ffff) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + + ar->debug.reset_htt_stats = reset; + + ret = ath10k_debug_htt_stats_req(ar); + if (ret) + goto out; + + ar->debug.reset_htt_stats = 0; + ret = count; + +out: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static const struct file_operations fops_reset_htt_stats = { + .write = ath10k_write_reset_htt_stats, + .owner = THIS_MODULE, + .open = simple_open, + .llseek = default_llseek, +}; + int ath10k_debug_create(struct ath10k *ar) { + ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN); + if (!ar->debug.cal_data) + return -ENOMEM; + + INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs); + INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs); + INIT_LIST_HEAD(&ar->debug.fw_stats.peers); + INIT_LIST_HEAD(&ar->debug.fw_stats.peers_extd); + + return 0; +} + +void ath10k_debug_destroy(struct ath10k *ar) +{ + vfree(ar->debug.cal_data); + ar->debug.cal_data = NULL; + + ath10k_debug_fw_stats_reset(ar); + + kfree(ar->debug.tpc_stats); + kfree(ar->debug.tpc_stats_final); +} + +int ath10k_debug_register(struct ath10k *ar) +{ ar->debug.debugfs_phy = debugfs_create_dir("ath10k", ar->hw->wiphy->debugfsdir); + if (IS_ERR_OR_NULL(ar->debug.debugfs_phy)) { + if (IS_ERR(ar->debug.debugfs_phy)) + return PTR_ERR(ar->debug.debugfs_phy); - if (!ar->debug.debugfs_phy) return -ENOMEM; + } + + INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork, + ath10k_debug_htt_stats_dwork); - init_completion(&ar->debug.event_stats_compl); + init_completion(&ar->debug.tpc_complete); + init_completion(&ar->debug.fw_stats_complete); - debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar, + debugfs_create_file("fw_stats", 0400, ar->debug.debugfs_phy, ar, &fops_fw_stats); - debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar, + debugfs_create_file("fw_reset_stats", 0400, ar->debug.debugfs_phy, ar, + &fops_fw_reset_stats); + + debugfs_create_file("wmi_services", 0400, ar->debug.debugfs_phy, ar, &fops_wmi_services); + debugfs_create_file("simulate_fw_crash", 0600, ar->debug.debugfs_phy, ar, + &fops_simulate_fw_crash); + + debugfs_create_file("reg_addr", 0600, ar->debug.debugfs_phy, ar, + &fops_reg_addr); + + debugfs_create_file("reg_value", 0600, ar->debug.debugfs_phy, ar, + &fops_reg_value); + + debugfs_create_file("mem_value", 0600, ar->debug.debugfs_phy, ar, + &fops_mem_value); + + debugfs_create_file("chip_id", 0400, ar->debug.debugfs_phy, ar, + &fops_chip_id); + + debugfs_create_file("htt_stats_mask", 0600, ar->debug.debugfs_phy, ar, + &fops_htt_stats_mask); + + debugfs_create_file("htt_max_amsdu_ampdu", 0600, ar->debug.debugfs_phy, ar, + &fops_htt_max_amsdu_ampdu); + + debugfs_create_file("fw_dbglog", 0600, ar->debug.debugfs_phy, ar, + &fops_fw_dbglog); + + if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, + ar->normal_mode_fw.fw_file.fw_features)) { + debugfs_create_file("cal_data", 0400, ar->debug.debugfs_phy, ar, + &fops_cal_data); + + debugfs_create_file("nf_cal_period", 0600, ar->debug.debugfs_phy, ar, + &fops_nf_cal_period); + } + + debugfs_create_file("ani_enable", 0600, ar->debug.debugfs_phy, ar, + &fops_ani_enable); + + if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) { + debugfs_create_file("dfs_simulate_radar", 0200, ar->debug.debugfs_phy, + ar, &fops_simulate_radar); + + debugfs_create_bool("dfs_block_radar_events", 0200, + ar->debug.debugfs_phy, + &ar->dfs_block_radar_events); + + debugfs_create_file("dfs_stats", 0400, ar->debug.debugfs_phy, ar, + &fops_dfs_stats); + } + + debugfs_create_file("pktlog_filter", 0644, ar->debug.debugfs_phy, ar, + &fops_pktlog_filter); + + if (test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map)) + debugfs_create_file("quiet_period", 0644, ar->debug.debugfs_phy, ar, + &fops_quiet_period); + + debugfs_create_file("tpc_stats", 0400, ar->debug.debugfs_phy, ar, + &fops_tpc_stats); + + if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map)) + debugfs_create_file("btcoex", 0644, ar->debug.debugfs_phy, ar, + &fops_btcoex); + + if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) { + debugfs_create_file("peer_stats", 0644, ar->debug.debugfs_phy, ar, + &fops_peer_stats); + + debugfs_create_file("enable_extd_tx_stats", 0644, + ar->debug.debugfs_phy, ar, + &fops_enable_extd_tx_stats); + } + + debugfs_create_file("fw_checksums", 0400, ar->debug.debugfs_phy, ar, + &fops_fw_checksums); + + if (IS_ENABLED(CONFIG_MAC80211_DEBUGFS)) + debugfs_create_file("sta_tid_stats_mask", 0600, + ar->debug.debugfs_phy, + ar, &fops_sta_tid_stats_mask); + + if (test_bit(WMI_SERVICE_TPC_STATS_FINAL, ar->wmi.svc_map)) + debugfs_create_file("tpc_stats_final", 0400, + ar->debug.debugfs_phy, ar, + &fops_tpc_stats_final); + + if (test_bit(WMI_SERVICE_RESET_CHIP, ar->wmi.svc_map)) + debugfs_create_file("warm_hw_reset", 0600, + ar->debug.debugfs_phy, ar, + &fops_warm_hw_reset); + + debugfs_create_file("ps_state_enable", 0600, ar->debug.debugfs_phy, ar, + &fops_ps_state_enable); + + debugfs_create_file("reset_htt_stats", 0200, ar->debug.debugfs_phy, ar, + &fops_reset_htt_stats); + return 0; } + +void ath10k_debug_unregister(struct ath10k *ar) +{ + cancel_delayed_work_sync(&ar->debug.htt_stats_dwork); +} + #endif /* CONFIG_ATH10K_DEBUGFS */ #ifdef CONFIG_ATH10K_DEBUG -void ath10k_dbg(enum ath10k_debug_mask mask, const char *fmt, ...) +void __ath10k_dbg(struct ath10k *ar, enum ath10k_debug_mask mask, + const char *fmt, ...) { struct va_format vaf; va_list args; @@ -475,27 +2666,43 @@ void ath10k_dbg(enum ath10k_debug_mask mask, const char *fmt, ...) vaf.va = &args; if (ath10k_debug_mask & mask) - ath10k_printk(KERN_DEBUG, "%pV", &vaf); + dev_printk(KERN_DEBUG, ar->dev, "%pV", &vaf); - trace_ath10k_log_dbg(mask, &vaf); + trace_ath10k_log_dbg(ar, mask, &vaf); va_end(args); } -EXPORT_SYMBOL(ath10k_dbg); +EXPORT_SYMBOL(__ath10k_dbg); -void ath10k_dbg_dump(enum ath10k_debug_mask mask, +void ath10k_dbg_dump(struct ath10k *ar, + enum ath10k_debug_mask mask, const char *msg, const char *prefix, const void *buf, size_t len) { + char linebuf[256]; + size_t linebuflen; + const void *ptr; + if (ath10k_debug_mask & mask) { if (msg) - ath10k_dbg(mask, "%s\n", msg); - - print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len); + __ath10k_dbg(ar, mask, "%s\n", msg); + + for (ptr = buf; (ptr - buf) < len; ptr += 16) { + linebuflen = 0; + linebuflen += scnprintf(linebuf + linebuflen, + sizeof(linebuf) - linebuflen, + "%s%08x: ", + (prefix ? prefix : ""), + (unsigned int)(ptr - buf)); + hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1, + linebuf + linebuflen, + sizeof(linebuf) - linebuflen, true); + dev_printk(KERN_DEBUG, ar->dev, "%s\n", linebuf); + } } /* tracing code doesn't like null strings :/ */ - trace_ath10k_log_dbg_dump(msg ? msg : "", prefix ? prefix : "", + trace_ath10k_log_dbg_dump(ar, msg ? msg : "", prefix ? prefix : "", buf, len); } EXPORT_SYMBOL(ath10k_dbg_dump); diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index 168140c54028..0af787f49b33 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -1,18 +1,8 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. */ #ifndef _DEBUG_H_ @@ -27,64 +17,272 @@ enum ath10k_debug_mask { ATH10K_DBG_HTC = 0x00000004, ATH10K_DBG_HTT = 0x00000008, ATH10K_DBG_MAC = 0x00000010, - ATH10K_DBG_CORE = 0x00000020, + ATH10K_DBG_BOOT = 0x00000020, ATH10K_DBG_PCI_DUMP = 0x00000040, ATH10K_DBG_HTT_DUMP = 0x00000080, ATH10K_DBG_MGMT = 0x00000100, ATH10K_DBG_DATA = 0x00000200, + ATH10K_DBG_BMI = 0x00000400, + ATH10K_DBG_REGULATORY = 0x00000800, + ATH10K_DBG_TESTMODE = 0x00001000, + ATH10K_DBG_WMI_PRINT = 0x00002000, + ATH10K_DBG_PCI_PS = 0x00004000, + ATH10K_DBG_AHB = 0x00008000, + ATH10K_DBG_SDIO = 0x00010000, + ATH10K_DBG_SDIO_DUMP = 0x00020000, + ATH10K_DBG_USB = 0x00040000, + ATH10K_DBG_USB_BULK = 0x00080000, + ATH10K_DBG_SNOC = 0x00100000, + ATH10K_DBG_QMI = 0x00200000, + ATH10K_DBG_STA = 0x00400000, ATH10K_DBG_ANY = 0xffffffff, }; +enum ath10k_pktlog_filter { + ATH10K_PKTLOG_RX = 0x000000001, + ATH10K_PKTLOG_TX = 0x000000002, + ATH10K_PKTLOG_RCFIND = 0x000000004, + ATH10K_PKTLOG_RCUPDATE = 0x000000008, + ATH10K_PKTLOG_DBG_PRINT = 0x000000010, + ATH10K_PKTLOG_PEER_STATS = 0x000000040, + ATH10K_PKTLOG_ANY = 0x00000005f, +}; + +enum ath10k_dbg_aggr_mode { + ATH10K_DBG_AGGR_MODE_AUTO, + ATH10K_DBG_AGGR_MODE_MANUAL, + ATH10K_DBG_AGGR_MODE_MAX, +}; + +/* Types of packet log events */ +enum ath_pktlog_type { + ATH_PKTLOG_TYPE_TX_CTRL = 1, + ATH_PKTLOG_TYPE_TX_STAT, +}; + +struct ath10k_pktlog_hdr { + __le16 flags; + __le16 missed_cnt; + __le16 log_type; /* Type of log information foll this header */ + __le16 size; /* Size of variable length log information in bytes */ + __le32 timestamp; + u8 payload[]; +} __packed; + +/* FIXME: How to calculate the buffer size sanely? */ +#define ATH10K_FW_STATS_BUF_SIZE (1024 * 1024) + +#define ATH10K_TX_POWER_MAX_VAL 70 +#define ATH10K_TX_POWER_MIN_VAL 0 + extern unsigned int ath10k_debug_mask; -extern __printf(1, 2) int ath10k_info(const char *fmt, ...); -extern __printf(1, 2) int ath10k_err(const char *fmt, ...); -extern __printf(1, 2) int ath10k_warn(const char *fmt, ...); +__printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...); +__printf(2, 3) void ath10k_err(struct ath10k *ar, const char *fmt, ...); +__printf(2, 3) void ath10k_warn(struct ath10k *ar, const char *fmt, ...); + +void ath10k_debug_print_hwfw_info(struct ath10k *ar); +void ath10k_debug_print_board_info(struct ath10k *ar); +void ath10k_debug_print_boot_info(struct ath10k *ar); +void ath10k_print_driver_info(struct ath10k *ar); #ifdef CONFIG_ATH10K_DEBUGFS +int ath10k_debug_start(struct ath10k *ar); +void ath10k_debug_stop(struct ath10k *ar); int ath10k_debug_create(struct ath10k *ar); -void ath10k_debug_read_service_map(struct ath10k *ar, - void *service_map, - size_t map_size); -void ath10k_debug_read_target_stats(struct ath10k *ar, - struct wmi_stats_event *ev); +void ath10k_debug_destroy(struct ath10k *ar); +int ath10k_debug_register(struct ath10k *ar); +void ath10k_debug_unregister(struct ath10k *ar); +void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb); +void ath10k_debug_tpc_stats_process(struct ath10k *ar, + struct ath10k_tpc_stats *tpc_stats); +void +ath10k_debug_tpc_stats_final_process(struct ath10k *ar, + struct ath10k_tpc_stats_final *tpc_stats); +void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len); + +#define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++) + +void ath10k_debug_get_et_strings(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u32 sset, u8 *data); +int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int sset); +void ath10k_debug_get_et_stats(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ethtool_stats *stats, u64 *data); + +static inline u64 ath10k_debug_get_fw_dbglog_mask(struct ath10k *ar) +{ + return ar->debug.fw_dbglog_mask; +} + +static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar) +{ + return ar->debug.fw_dbglog_level; +} + +static inline int ath10k_debug_is_extd_tx_stats_enabled(struct ath10k *ar) +{ + return ar->debug.enable_extd_tx_stats; +} + +int ath10k_debug_fw_stats_request(struct ath10k *ar); #else + +static inline int ath10k_debug_start(struct ath10k *ar) +{ + return 0; +} + +static inline void ath10k_debug_stop(struct ath10k *ar) +{ +} + static inline int ath10k_debug_create(struct ath10k *ar) { return 0; } -static inline void ath10k_debug_read_service_map(struct ath10k *ar, - void *service_map, - size_t map_size) +static inline void ath10k_debug_destroy(struct ath10k *ar) +{ +} + +static inline int ath10k_debug_register(struct ath10k *ar) +{ + return 0; +} + +static inline void ath10k_debug_unregister(struct ath10k *ar) +{ +} + +static inline void ath10k_debug_fw_stats_process(struct ath10k *ar, + struct sk_buff *skb) +{ +} + +static inline void ath10k_debug_tpc_stats_process(struct ath10k *ar, + struct ath10k_tpc_stats *tpc_stats) +{ + kfree(tpc_stats); +} + +static inline void +ath10k_debug_tpc_stats_final_process(struct ath10k *ar, + struct ath10k_tpc_stats_final *tpc_stats) +{ + kfree(tpc_stats); +} + +static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, + int len) +{ +} + +static inline u64 ath10k_debug_get_fw_dbglog_mask(struct ath10k *ar) +{ + return 0; +} + +static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar) +{ + return 0; +} + +static inline int ath10k_debug_is_extd_tx_stats_enabled(struct ath10k *ar) { + return 0; } -static inline void ath10k_debug_read_target_stats(struct ath10k *ar, - struct wmi_stats_event *ev) +static inline int ath10k_debug_fw_stats_request(struct ath10k *ar) { + return 0; } + +#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0) + +#define ath10k_debug_get_et_strings NULL +#define ath10k_debug_get_et_sset_count NULL +#define ath10k_debug_get_et_stats NULL + #endif /* CONFIG_ATH10K_DEBUGFS */ +#ifdef CONFIG_MAC80211_DEBUGFS +void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct dentry *dir); +void ath10k_sta_update_rx_duration(struct ath10k *ar, + struct ath10k_fw_stats *stats); +void ath10k_sta_update_rx_tid_stats(struct ath10k *ar, u8 *first_hdr, + unsigned long num_msdus, + enum ath10k_pkt_rx_err err, + unsigned long unchain_cnt, + unsigned long drop_cnt, + unsigned long drop_cnt_filter, + unsigned long queued_msdus); +void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar, + u16 peer_id, u8 tid, + struct htt_rx_indication_mpdu_range *ranges, + int num_ranges); +#else +static inline +void ath10k_sta_update_rx_duration(struct ath10k *ar, + struct ath10k_fw_stats *stats) +{ +} + +static inline +void ath10k_sta_update_rx_tid_stats(struct ath10k *ar, u8 *first_hdr, + unsigned long num_msdus, + enum ath10k_pkt_rx_err err, + unsigned long unchain_cnt, + unsigned long drop_cnt, + unsigned long drop_cnt_filter, + unsigned long queued_msdus) +{ +} + +static inline +void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar, + u16 peer_id, u8 tid, + struct htt_rx_indication_mpdu_range *ranges, + int num_ranges) +{ +} +#endif /* CONFIG_MAC80211_DEBUGFS */ #ifdef CONFIG_ATH10K_DEBUG -extern __printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask, - const char *fmt, ...); -void ath10k_dbg_dump(enum ath10k_debug_mask mask, +__printf(3, 4) void __ath10k_dbg(struct ath10k *ar, + enum ath10k_debug_mask mask, + const char *fmt, ...); +void ath10k_dbg_dump(struct ath10k *ar, + enum ath10k_debug_mask mask, const char *msg, const char *prefix, const void *buf, size_t len); #else /* CONFIG_ATH10K_DEBUG */ -static inline int ath10k_dbg(enum ath10k_debug_mask dbg_mask, - const char *fmt, ...) +static inline int __ath10k_dbg(struct ath10k *ar, + enum ath10k_debug_mask dbg_mask, + const char *fmt, ...) { return 0; } -static inline void ath10k_dbg_dump(enum ath10k_debug_mask mask, +static inline void ath10k_dbg_dump(struct ath10k *ar, + enum ath10k_debug_mask mask, const char *msg, const char *prefix, const void *buf, size_t len) { } #endif /* CONFIG_ATH10K_DEBUG */ + +/* Avoid calling __ath10k_dbg() if debug_mask is not set and tracing + * disabled. + */ +#define ath10k_dbg(ar, dbg_mask, fmt, ...) \ +do { \ + if ((ath10k_debug_mask & dbg_mask) || \ + trace_ath10k_log_dbg_enabled()) \ + __ath10k_dbg(ar, dbg_mask, fmt, ##__VA_ARGS__); \ +} while (0) #endif /* _DEBUG_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c new file mode 100644 index 000000000000..b9fb192e0b48 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c @@ -0,0 +1,779 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + +#include "core.h" +#include "wmi-ops.h" +#include "txrx.h" +#include "debug.h" + +static void ath10k_rx_stats_update_amsdu_subfrm(struct ath10k *ar, + struct ath10k_sta_tid_stats *stats, + u32 msdu_count) +{ + if (msdu_count == 1) + stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_1]++; + else if (msdu_count == 2) + stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_2]++; + else if (msdu_count == 3) + stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_3]++; + else if (msdu_count == 4) + stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_4]++; + else if (msdu_count > 4) + stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_MORE]++; +} + +static void ath10k_rx_stats_update_ampdu_subfrm(struct ath10k *ar, + struct ath10k_sta_tid_stats *stats, + u32 mpdu_count) +{ + if (mpdu_count <= 10) + stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_10]++; + else if (mpdu_count <= 20) + stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_20]++; + else if (mpdu_count <= 30) + stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_30]++; + else if (mpdu_count <= 40) + stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_40]++; + else if (mpdu_count <= 50) + stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_50]++; + else if (mpdu_count <= 60) + stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_60]++; + else if (mpdu_count > 60) + stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_MORE]++; +} + +void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar, u16 peer_id, u8 tid, + struct htt_rx_indication_mpdu_range *ranges, + int num_ranges) +{ + struct ath10k_sta *arsta; + struct ath10k_peer *peer; + int i; + + if (tid > IEEE80211_NUM_TIDS || !(ar->sta_tid_stats_mask & BIT(tid))) + return; + + rcu_read_lock(); + spin_lock_bh(&ar->data_lock); + + peer = ath10k_peer_find_by_id(ar, peer_id); + if (!peer || !peer->sta) + goto out; + + arsta = (struct ath10k_sta *)peer->sta->drv_priv; + + for (i = 0; i < num_ranges; i++) + ath10k_rx_stats_update_ampdu_subfrm(ar, + &arsta->tid_stats[tid], + ranges[i].mpdu_count); + +out: + spin_unlock_bh(&ar->data_lock); + rcu_read_unlock(); +} + +void ath10k_sta_update_rx_tid_stats(struct ath10k *ar, u8 *first_hdr, + unsigned long num_msdus, + enum ath10k_pkt_rx_err err, + unsigned long unchain_cnt, + unsigned long drop_cnt, + unsigned long drop_cnt_filter, + unsigned long queued_msdus) +{ + struct ieee80211_sta *sta; + struct ath10k_sta *arsta; + struct ieee80211_hdr *hdr; + struct ath10k_sta_tid_stats *stats; + u8 tid = IEEE80211_NUM_TIDS; + bool non_data_frm = false; + + hdr = (struct ieee80211_hdr *)first_hdr; + if (!ieee80211_is_data(hdr->frame_control)) + non_data_frm = true; + + if (ieee80211_is_data_qos(hdr->frame_control)) + tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; + + if (!(ar->sta_tid_stats_mask & BIT(tid)) || non_data_frm) + return; + + rcu_read_lock(); + + sta = ieee80211_find_sta_by_ifaddr(ar->hw, hdr->addr2, NULL); + if (!sta) + goto exit; + + arsta = (struct ath10k_sta *)sta->drv_priv; + + spin_lock_bh(&ar->data_lock); + stats = &arsta->tid_stats[tid]; + stats->rx_pkt_from_fw += num_msdus; + stats->rx_pkt_unchained += unchain_cnt; + stats->rx_pkt_drop_chained += drop_cnt; + stats->rx_pkt_drop_filter += drop_cnt_filter; + if (err != ATH10K_PKT_RX_ERR_MAX) + stats->rx_pkt_err[err] += queued_msdus; + stats->rx_pkt_queued_for_mac += queued_msdus; + ath10k_rx_stats_update_amsdu_subfrm(ar, &arsta->tid_stats[tid], + num_msdus); + spin_unlock_bh(&ar->data_lock); + +exit: + rcu_read_unlock(); +} + +static void ath10k_sta_update_extd_stats_rx_duration(struct ath10k *ar, + struct ath10k_fw_stats *stats) +{ + struct ath10k_fw_extd_stats_peer *peer; + struct ieee80211_sta *sta; + struct ath10k_sta *arsta; + + rcu_read_lock(); + list_for_each_entry(peer, &stats->peers_extd, list) { + sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer->peer_macaddr, + NULL); + if (!sta) + continue; + arsta = (struct ath10k_sta *)sta->drv_priv; + arsta->rx_duration += (u64)peer->rx_duration; + } + rcu_read_unlock(); +} + +static void ath10k_sta_update_stats_rx_duration(struct ath10k *ar, + struct ath10k_fw_stats *stats) +{ + struct ath10k_fw_stats_peer *peer; + struct ieee80211_sta *sta; + struct ath10k_sta *arsta; + + rcu_read_lock(); + list_for_each_entry(peer, &stats->peers, list) { + sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer->peer_macaddr, + NULL); + if (!sta) + continue; + arsta = (struct ath10k_sta *)sta->drv_priv; + arsta->rx_duration += (u64)peer->rx_duration; + } + rcu_read_unlock(); +} + +void ath10k_sta_update_rx_duration(struct ath10k *ar, + struct ath10k_fw_stats *stats) +{ + if (stats->extended) + ath10k_sta_update_extd_stats_rx_duration(ar, stats); + else + ath10k_sta_update_stats_rx_duration(ar, stats); +} + +static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_sta *sta = file->private_data; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k *ar = arsta->arvif->ar; + char buf[32]; + int len = 0; + + mutex_lock(&ar->conf_mutex); + len = scnprintf(buf, sizeof(buf) - len, "aggregation mode: %s\n", + (arsta->aggr_mode == ATH10K_DBG_AGGR_MODE_AUTO) ? + "auto" : "manual"); + mutex_unlock(&ar->conf_mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath10k_dbg_sta_write_aggr_mode(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_sta *sta = file->private_data; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k *ar = arsta->arvif->ar; + u32 aggr_mode; + int ret; + + if (kstrtouint_from_user(user_buf, count, 0, &aggr_mode)) + return -EINVAL; + + if (aggr_mode >= ATH10K_DBG_AGGR_MODE_MAX) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + if ((ar->state != ATH10K_STATE_ON) || + (aggr_mode == arsta->aggr_mode)) { + ret = count; + goto out; + } + + ret = ath10k_wmi_addba_clear_resp(ar, arsta->arvif->vdev_id, sta->addr); + if (ret) { + ath10k_warn(ar, "failed to clear addba session ret: %d\n", ret); + goto out; + } + + arsta->aggr_mode = aggr_mode; +out: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static const struct file_operations fops_aggr_mode = { + .read = ath10k_dbg_sta_read_aggr_mode, + .write = ath10k_dbg_sta_write_aggr_mode, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath10k_dbg_sta_write_addba(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_sta *sta = file->private_data; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k *ar = arsta->arvif->ar; + u32 tid, buf_size; + int ret; + char buf[64] = {}; + + ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, + user_buf, count); + if (ret <= 0) + return ret; + + ret = sscanf(buf, "%u %u", &tid, &buf_size); + if (ret != 2) + return -EINVAL; + + /* Valid TID values are 0 through 15 */ + if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + if ((ar->state != ATH10K_STATE_ON) || + (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) { + ret = count; + goto out; + } + + ret = ath10k_wmi_addba_send(ar, arsta->arvif->vdev_id, sta->addr, + tid, buf_size); + if (ret) { + ath10k_warn(ar, "failed to send addba request: vdev_id %u peer %pM tid %u buf_size %u\n", + arsta->arvif->vdev_id, sta->addr, tid, buf_size); + } + + ret = count; +out: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static const struct file_operations fops_addba = { + .write = ath10k_dbg_sta_write_addba, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath10k_dbg_sta_write_addba_resp(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_sta *sta = file->private_data; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k *ar = arsta->arvif->ar; + u32 tid, status; + int ret; + char buf[64] = {}; + + ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, + user_buf, count); + if (ret <= 0) + return ret; + + ret = sscanf(buf, "%u %u", &tid, &status); + if (ret != 2) + return -EINVAL; + + /* Valid TID values are 0 through 15 */ + if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + if ((ar->state != ATH10K_STATE_ON) || + (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) { + ret = count; + goto out; + } + + ret = ath10k_wmi_addba_set_resp(ar, arsta->arvif->vdev_id, sta->addr, + tid, status); + if (ret) { + ath10k_warn(ar, "failed to send addba response: vdev_id %u peer %pM tid %u status%u\n", + arsta->arvif->vdev_id, sta->addr, tid, status); + } + ret = count; +out: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static const struct file_operations fops_addba_resp = { + .write = ath10k_dbg_sta_write_addba_resp, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath10k_dbg_sta_write_delba(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_sta *sta = file->private_data; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k *ar = arsta->arvif->ar; + u32 tid, initiator, reason; + int ret; + char buf[64] = {}; + + ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, + user_buf, count); + if (ret <= 0) + return ret; + + ret = sscanf(buf, "%u %u %u", &tid, &initiator, &reason); + if (ret != 3) + return -EINVAL; + + /* Valid TID values are 0 through 15 */ + if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + if ((ar->state != ATH10K_STATE_ON) || + (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) { + ret = count; + goto out; + } + + ret = ath10k_wmi_delba_send(ar, arsta->arvif->vdev_id, sta->addr, + tid, initiator, reason); + if (ret) { + ath10k_warn(ar, "failed to send delba: vdev_id %u peer %pM tid %u initiator %u reason %u\n", + arsta->arvif->vdev_id, sta->addr, tid, initiator, + reason); + } + ret = count; +out: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static const struct file_operations fops_delba = { + .write = ath10k_dbg_sta_write_delba, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath10k_dbg_sta_read_peer_debug_trigger(struct file *file, + char __user *user_buf, + size_t count, + loff_t *ppos) +{ + struct ieee80211_sta *sta = file->private_data; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k *ar = arsta->arvif->ar; + char buf[8]; + int len = 0; + + mutex_lock(&ar->conf_mutex); + len = scnprintf(buf, sizeof(buf) - len, + "Write 1 to once trigger the debug logs\n"); + mutex_unlock(&ar->conf_mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t +ath10k_dbg_sta_write_peer_debug_trigger(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_sta *sta = file->private_data; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k *ar = arsta->arvif->ar; + u8 peer_debug_trigger; + int ret; + + if (kstrtou8_from_user(user_buf, count, 0, &peer_debug_trigger)) + return -EINVAL; + + if (peer_debug_trigger != 1) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON) { + ret = -ENETDOWN; + goto out; + } + + ret = ath10k_wmi_peer_set_param(ar, arsta->arvif->vdev_id, sta->addr, + ar->wmi.peer_param->debug, peer_debug_trigger); + if (ret) { + ath10k_warn(ar, "failed to set param to trigger peer tid logs for station ret: %d\n", + ret); + goto out; + } +out: + mutex_unlock(&ar->conf_mutex); + return ret ?: count; +} + +static const struct file_operations fops_peer_debug_trigger = { + .open = simple_open, + .read = ath10k_dbg_sta_read_peer_debug_trigger, + .write = ath10k_dbg_sta_write_peer_debug_trigger, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath10k_dbg_sta_read_peer_ps_state(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_sta *sta = file->private_data; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k *ar = arsta->arvif->ar; + char buf[20]; + int len = 0; + + spin_lock_bh(&ar->data_lock); + + len = scnprintf(buf, sizeof(buf) - len, "%d\n", + arsta->peer_ps_state); + + spin_unlock_bh(&ar->data_lock); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_peer_ps_state = { + .open = simple_open, + .read = ath10k_dbg_sta_read_peer_ps_state, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static char *get_err_str(enum ath10k_pkt_rx_err i) +{ + switch (i) { + case ATH10K_PKT_RX_ERR_FCS: + return "fcs_err"; + case ATH10K_PKT_RX_ERR_TKIP: + return "tkip_err"; + case ATH10K_PKT_RX_ERR_CRYPT: + return "crypt_err"; + case ATH10K_PKT_RX_ERR_PEER_IDX_INVAL: + return "peer_idx_inval"; + case ATH10K_PKT_RX_ERR_MAX: + return "unknown"; + } + + return "unknown"; +} + +static char *get_num_ampdu_subfrm_str(enum ath10k_ampdu_subfrm_num i) +{ + switch (i) { + case ATH10K_AMPDU_SUBFRM_NUM_10: + return "up to 10"; + case ATH10K_AMPDU_SUBFRM_NUM_20: + return "11-20"; + case ATH10K_AMPDU_SUBFRM_NUM_30: + return "21-30"; + case ATH10K_AMPDU_SUBFRM_NUM_40: + return "31-40"; + case ATH10K_AMPDU_SUBFRM_NUM_50: + return "41-50"; + case ATH10K_AMPDU_SUBFRM_NUM_60: + return "51-60"; + case ATH10K_AMPDU_SUBFRM_NUM_MORE: + return ">60"; + case ATH10K_AMPDU_SUBFRM_NUM_MAX: + return "0"; + } + + return "0"; +} + +static char *get_num_amsdu_subfrm_str(enum ath10k_amsdu_subfrm_num i) +{ + switch (i) { + case ATH10K_AMSDU_SUBFRM_NUM_1: + return "1"; + case ATH10K_AMSDU_SUBFRM_NUM_2: + return "2"; + case ATH10K_AMSDU_SUBFRM_NUM_3: + return "3"; + case ATH10K_AMSDU_SUBFRM_NUM_4: + return "4"; + case ATH10K_AMSDU_SUBFRM_NUM_MORE: + return ">4"; + case ATH10K_AMSDU_SUBFRM_NUM_MAX: + return "0"; + } + + return "0"; +} + +#define PRINT_TID_STATS(_field, _tabs) \ + do { \ + int k = 0; \ + for (j = 0; j <= IEEE80211_NUM_TIDS; j++) { \ + if (ar->sta_tid_stats_mask & BIT(j)) { \ + len += scnprintf(buf + len, buf_len - len, \ + "[%02d] %-10lu ", \ + j, stats[j]._field); \ + k++; \ + if (k % 8 == 0) { \ + len += scnprintf(buf + len, \ + buf_len - len, "\n"); \ + len += scnprintf(buf + len, \ + buf_len - len, \ + _tabs); \ + } \ + } \ + } \ + len += scnprintf(buf + len, buf_len - len, "\n"); \ + } while (0) + +static ssize_t ath10k_dbg_sta_read_tid_stats(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_sta *sta = file->private_data; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k *ar = arsta->arvif->ar; + struct ath10k_sta_tid_stats *stats = arsta->tid_stats; + size_t len = 0, buf_len = 1048 * IEEE80211_NUM_TIDS; + char *buf; + int i, j; + ssize_t ret; + + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&ar->conf_mutex); + + spin_lock_bh(&ar->data_lock); + + len += scnprintf(buf + len, buf_len - len, + "\n\t\tDriver Rx pkt stats per tid, ([tid] count)\n"); + len += scnprintf(buf + len, buf_len - len, + "\t\t------------------------------------------\n"); + len += scnprintf(buf + len, buf_len - len, "MSDUs from FW\t\t\t"); + PRINT_TID_STATS(rx_pkt_from_fw, "\t\t\t\t"); + + len += scnprintf(buf + len, buf_len - len, "MSDUs unchained\t\t\t"); + PRINT_TID_STATS(rx_pkt_unchained, "\t\t\t\t"); + + len += scnprintf(buf + len, buf_len - len, + "MSDUs locally dropped:chained\t"); + PRINT_TID_STATS(rx_pkt_drop_chained, "\t\t\t\t"); + + len += scnprintf(buf + len, buf_len - len, + "MSDUs locally dropped:filtered\t"); + PRINT_TID_STATS(rx_pkt_drop_filter, "\t\t\t\t"); + + len += scnprintf(buf + len, buf_len - len, + "MSDUs queued for mac80211\t"); + PRINT_TID_STATS(rx_pkt_queued_for_mac, "\t\t\t\t"); + + for (i = 0; i < ATH10K_PKT_RX_ERR_MAX; i++) { + len += scnprintf(buf + len, buf_len - len, + "MSDUs with error:%s\t", get_err_str(i)); + PRINT_TID_STATS(rx_pkt_err[i], "\t\t\t\t"); + } + + len += scnprintf(buf + len, buf_len - len, "\n"); + for (i = 0; i < ATH10K_AMPDU_SUBFRM_NUM_MAX; i++) { + len += scnprintf(buf + len, buf_len - len, + "A-MPDU num subframes %s\t", + get_num_ampdu_subfrm_str(i)); + PRINT_TID_STATS(rx_pkt_ampdu[i], "\t\t\t\t"); + } + + len += scnprintf(buf + len, buf_len - len, "\n"); + for (i = 0; i < ATH10K_AMSDU_SUBFRM_NUM_MAX; i++) { + len += scnprintf(buf + len, buf_len - len, + "A-MSDU num subframes %s\t\t", + get_num_amsdu_subfrm_str(i)); + PRINT_TID_STATS(rx_pkt_amsdu[i], "\t\t\t\t"); + } + + spin_unlock_bh(&ar->data_lock); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + kfree(buf); + + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static const struct file_operations fops_tid_stats_dump = { + .open = simple_open, + .read = ath10k_dbg_sta_read_tid_stats, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath10k_dbg_sta_dump_tx_stats(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_sta *sta = file->private_data; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k *ar = arsta->arvif->ar; + struct ath10k_htt_data_stats *stats; + const char *str_name[ATH10K_STATS_TYPE_MAX] = {"succ", "fail", + "retry", "ampdu"}; + const char *str[ATH10K_COUNTER_TYPE_MAX] = {"bytes", "packets"}; + int len = 0, i, j, k, retval = 0; + const int size = 16 * 4096; + char *buf; + + buf = kzalloc(size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&ar->conf_mutex); + + if (!arsta->tx_stats) { + ath10k_warn(ar, "failed to get tx stats"); + mutex_unlock(&ar->conf_mutex); + kfree(buf); + return 0; + } + + spin_lock_bh(&ar->data_lock); + for (k = 0; k < ATH10K_STATS_TYPE_MAX; k++) { + for (j = 0; j < ATH10K_COUNTER_TYPE_MAX; j++) { + stats = &arsta->tx_stats->stats[k]; + len += scnprintf(buf + len, size - len, "%s_%s\n", + str_name[k], + str[j]); + len += scnprintf(buf + len, size - len, + " VHT MCS %s\n", + str[j]); + for (i = 0; i < ATH10K_VHT_MCS_NUM; i++) + len += scnprintf(buf + len, size - len, + " %llu ", + stats->vht[j][i]); + len += scnprintf(buf + len, size - len, "\n"); + len += scnprintf(buf + len, size - len, " HT MCS %s\n", + str[j]); + for (i = 0; i < ATH10K_HT_MCS_NUM; i++) + len += scnprintf(buf + len, size - len, + " %llu ", stats->ht[j][i]); + len += scnprintf(buf + len, size - len, "\n"); + len += scnprintf(buf + len, size - len, + " BW %s (20,5,10,40,80,160 MHz)\n", str[j]); + len += scnprintf(buf + len, size - len, + " %llu %llu %llu %llu %llu %llu\n", + stats->bw[j][0], stats->bw[j][1], + stats->bw[j][2], stats->bw[j][3], + stats->bw[j][4], stats->bw[j][5]); + len += scnprintf(buf + len, size - len, + " NSS %s (1x1,2x2,3x3,4x4)\n", str[j]); + len += scnprintf(buf + len, size - len, + " %llu %llu %llu %llu\n", + stats->nss[j][0], stats->nss[j][1], + stats->nss[j][2], stats->nss[j][3]); + len += scnprintf(buf + len, size - len, + " GI %s (LGI,SGI)\n", + str[j]); + len += scnprintf(buf + len, size - len, " %llu %llu\n", + stats->gi[j][0], stats->gi[j][1]); + len += scnprintf(buf + len, size - len, + " legacy rate %s (1,2 ... Mbps)\n ", + str[j]); + for (i = 0; i < ATH10K_LEGACY_NUM; i++) + len += scnprintf(buf + len, size - len, "%llu ", + stats->legacy[j][i]); + len += scnprintf(buf + len, size - len, "\n"); + len += scnprintf(buf + len, size - len, + " Rate table %s (1,2 ... Mbps)\n ", + str[j]); + for (i = 0; i < ATH10K_RATE_TABLE_NUM; i++) { + len += scnprintf(buf + len, size - len, "%llu ", + stats->rate_table[j][i]); + if (!((i + 1) % 8)) + len += + scnprintf(buf + len, size - len, "\n "); + } + } + } + + len += scnprintf(buf + len, size - len, + "\nTX duration\n %llu usecs\n", + arsta->tx_stats->tx_duration); + len += scnprintf(buf + len, size - len, + "BA fails\n %llu\n", arsta->tx_stats->ba_fails); + len += scnprintf(buf + len, size - len, + "ack fails\n %llu\n", arsta->tx_stats->ack_fails); + spin_unlock_bh(&ar->data_lock); + + if (len > size) + len = size; + retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + + mutex_unlock(&ar->conf_mutex); + return retval; +} + +static const struct file_operations fops_tx_stats = { + .read = ath10k_dbg_sta_dump_tx_stats, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct dentry *dir) +{ + struct ath10k *ar = hw->priv; + + debugfs_create_file("aggr_mode", 0644, dir, sta, &fops_aggr_mode); + debugfs_create_file("addba", 0200, dir, sta, &fops_addba); + debugfs_create_file("addba_resp", 0200, dir, sta, &fops_addba_resp); + debugfs_create_file("delba", 0200, dir, sta, &fops_delba); + debugfs_create_file("peer_debug_trigger", 0600, dir, sta, + &fops_peer_debug_trigger); + debugfs_create_file("dump_tid_stats", 0400, dir, sta, + &fops_tid_stats_dump); + + if (ath10k_peer_stats_enabled(ar) && + ath10k_debug_is_extd_tx_stats_enabled(ar)) + debugfs_create_file("tx_stats", 0400, dir, sta, + &fops_tx_stats); + debugfs_create_file("peer_ps_state", 0400, dir, sta, + &fops_peer_ps_state); +} diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h index 73a24d44d1b4..9e45fd9073a6 100644 --- a/drivers/net/wireless/ath/ath10k/hif.h +++ b/drivers/net/wireless/ath/ath10k/hif.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc. */ #ifndef _HIF_H_ @@ -20,23 +9,34 @@ #include <linux/kernel.h> #include "core.h" +#include "bmi.h" +#include "debug.h" -struct ath10k_hif_cb { - int (*tx_completion)(struct ath10k *ar, - struct sk_buff *wbuf, - unsigned transfer_id); - int (*rx_completion)(struct ath10k *ar, - struct sk_buff *wbuf, - u8 pipe_id); +/* Types of fw logging mode */ +enum ath_dbg_mode { + ATH10K_ENABLE_FW_LOG_DIAG, + ATH10K_ENABLE_FW_LOG_CE, +}; + +struct ath10k_hif_sg_item { + u16 transfer_id; + void *transfer_context; /* NULL = tx completion callback not called */ + void *vaddr; /* for debugging mostly */ + dma_addr_t paddr; + u16 len; }; struct ath10k_hif_ops { - /* Send the head of a buffer to HIF for transmission to the target. */ - int (*send_head)(struct ath10k *ar, u8 pipe_id, - unsigned int transfer_id, - unsigned int nbytes, - struct sk_buff *buf); + /* send a scatter-gather list to the target */ + int (*tx_sg)(struct ath10k *ar, u8 pipe_id, + struct ath10k_hif_sg_item *items, int n_items); + /* read firmware memory through the diagnose interface */ + int (*diag_read)(struct ath10k *ar, u32 address, void *buf, + size_t buf_len); + + int (*diag_write)(struct ath10k *ar, u32 address, const void *data, + int nbytes); /* * API to handle HIF-specific BMI message exchanges, this API is * synchronous and only allowed to be called from a context that @@ -46,13 +46,20 @@ struct ath10k_hif_ops { void *request, u32 request_len, void *response, u32 *response_len); + /* Post BMI phase, after FW is loaded. Starts regular operation */ int (*start)(struct ath10k *ar); + /* Clean up what start() did. This does not revert to BMI phase. If + * desired so, call power_down() and power_up() + */ void (*stop)(struct ath10k *ar); + int (*start_post)(struct ath10k *ar); + + int (*get_htt_tx_complete)(struct ath10k *ar); + int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id, - u8 *ul_pipe, u8 *dl_pipe, - int *ul_is_polled, int *dl_is_polled); + u8 *ul_pipe, u8 *dl_pipe); void (*get_default_pipe)(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe); @@ -66,19 +73,52 @@ struct ath10k_hif_ops { */ void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force); - void (*init)(struct ath10k *ar, - struct ath10k_hif_cb *callbacks); - u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id); + + u32 (*read32)(struct ath10k *ar, u32 address); + + void (*write32)(struct ath10k *ar, u32 address, u32 value); + + /* Power up the device and enter BMI transfer mode for FW download */ + int (*power_up)(struct ath10k *ar, enum ath10k_firmware_mode fw_mode); + + /* Power down the device and free up resources. stop() must be called + * before this if start() was called earlier + */ + void (*power_down)(struct ath10k *ar); + + int (*suspend)(struct ath10k *ar); + int (*resume)(struct ath10k *ar); + + /* fetch calibration data from target eeprom */ + int (*fetch_cal_eeprom)(struct ath10k *ar, void **data, + size_t *data_len); + + int (*get_target_info)(struct ath10k *ar, + struct bmi_target_info *target_info); + int (*set_target_log_mode)(struct ath10k *ar, u8 fw_log_mode); }; +static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id, + struct ath10k_hif_sg_item *items, + int n_items) +{ + return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items); +} + +static inline int ath10k_hif_diag_read(struct ath10k *ar, u32 address, void *buf, + size_t buf_len) +{ + return ar->hif.ops->diag_read(ar, address, buf, buf_len); +} -static inline int ath10k_hif_send_head(struct ath10k *ar, u8 pipe_id, - unsigned int transfer_id, - unsigned int nbytes, - struct sk_buff *buf) +static inline int ath10k_hif_diag_write(struct ath10k *ar, u32 address, + const void *data, int nbytes) { - return ar->hif.ops->send_head(ar, pipe_id, transfer_id, nbytes, buf); + if (!ar->hif.ops->diag_write) + return -EOPNOTSUPP; + + return ar->hif.ops->diag_write(ar, address, data, nbytes); } static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar, @@ -99,15 +139,26 @@ static inline void ath10k_hif_stop(struct ath10k *ar) return ar->hif.ops->stop(ar); } +static inline int ath10k_hif_start_post(struct ath10k *ar) +{ + if (ar->hif.ops->start_post) + return ar->hif.ops->start_post(ar); + return 0; +} + +static inline int ath10k_hif_get_htt_tx_complete(struct ath10k *ar) +{ + if (ar->hif.ops->get_htt_tx_complete) + return ar->hif.ops->get_htt_tx_complete(ar); + return 0; +} + static inline int ath10k_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, - u8 *ul_pipe, u8 *dl_pipe, - int *ul_is_polled, - int *dl_is_polled) + u8 *ul_pipe, u8 *dl_pipe) { return ar->hif.ops->map_service_to_pipe(ar, service_id, - ul_pipe, dl_pipe, - ul_is_polled, dl_is_polled); + ul_pipe, dl_pipe); } static inline void ath10k_hif_get_default_pipe(struct ath10k *ar, @@ -119,13 +170,8 @@ static inline void ath10k_hif_get_default_pipe(struct ath10k *ar, static inline void ath10k_hif_send_complete_check(struct ath10k *ar, u8 pipe_id, int force) { - ar->hif.ops->send_complete_check(ar, pipe_id, force); -} - -static inline void ath10k_hif_init(struct ath10k *ar, - struct ath10k_hif_cb *callbacks) -{ - ar->hif.ops->init(ar, callbacks); + if (ar->hif.ops->send_complete_check) + ar->hif.ops->send_complete_check(ar, pipe_id, force); } static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar, @@ -134,4 +180,79 @@ static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar, return ar->hif.ops->get_free_queue_number(ar, pipe_id); } +static inline int ath10k_hif_power_up(struct ath10k *ar, + enum ath10k_firmware_mode fw_mode) +{ + return ar->hif.ops->power_up(ar, fw_mode); +} + +static inline void ath10k_hif_power_down(struct ath10k *ar) +{ + ar->hif.ops->power_down(ar); +} + +static inline int ath10k_hif_suspend(struct ath10k *ar) +{ + if (!ar->hif.ops->suspend) + return -EOPNOTSUPP; + + return ar->hif.ops->suspend(ar); +} + +static inline int ath10k_hif_resume(struct ath10k *ar) +{ + if (!ar->hif.ops->resume) + return -EOPNOTSUPP; + + return ar->hif.ops->resume(ar); +} + +static inline u32 ath10k_hif_read32(struct ath10k *ar, u32 address) +{ + if (!ar->hif.ops->read32) { + ath10k_warn(ar, "hif read32 not supported\n"); + return 0xdeaddead; + } + + return ar->hif.ops->read32(ar, address); +} + +static inline void ath10k_hif_write32(struct ath10k *ar, + u32 address, u32 data) +{ + if (!ar->hif.ops->write32) { + ath10k_warn(ar, "hif write32 not supported\n"); + return; + } + + ar->hif.ops->write32(ar, address, data); +} + +static inline int ath10k_hif_fetch_cal_eeprom(struct ath10k *ar, + void **data, + size_t *data_len) +{ + if (!ar->hif.ops->fetch_cal_eeprom) + return -EOPNOTSUPP; + + return ar->hif.ops->fetch_cal_eeprom(ar, data, data_len); +} + +static inline int ath10k_hif_get_target_info(struct ath10k *ar, + struct bmi_target_info *tgt_info) +{ + if (!ar->hif.ops->get_target_info) + return -EOPNOTSUPP; + + return ar->hif.ops->get_target_info(ar, tgt_info); +} + +static inline int ath10k_hif_set_target_log_mode(struct ath10k *ar, + u8 fw_log_mode) +{ + if (!ar->hif.ops->set_target_log_mode) + return -EOPNOTSUPP; + + return ar->hif.ops->set_target_log_mode(ar, fw_log_mode); +} #endif /* _HIF_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index 74363c949392..ce9b248c12dc 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -1,20 +1,13 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ +#include <linux/export.h> + #include "core.h" #include "hif.h" #include "debug.h" @@ -23,16 +16,6 @@ /* Send */ /********/ -static inline void ath10k_htc_send_complete_check(struct ath10k_htc_ep *ep, - int force) -{ - /* - * Check whether HIF has any prior sends that have finished, - * have not had the post-processing done. - */ - ath10k_hif_send_complete_check(ep->htc->ar, ep->ul_pipe_id, force); -} - static void ath10k_htc_control_tx_complete(struct ath10k *ar, struct sk_buff *skb) { @@ -45,10 +28,8 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar) struct ath10k_skb_cb *skb_cb; skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE); - if (!skb) { - ath10k_warn("Unable to allocate ctrl skb\n"); + if (!skb) return NULL; - } skb_reserve(skb, 20); /* FIXME: why 20 bytes? */ WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); @@ -56,46 +37,55 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar) skb_cb = ATH10K_SKB_CB(skb); memset(skb_cb, 0, sizeof(*skb_cb)); - ath10k_dbg(ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb); + ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb); return skb; } static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc, struct sk_buff *skb) { - ath10k_skb_unmap(htc->ar->dev, skb); + struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); + + if (htc->ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) + dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE); skb_pull(skb, sizeof(struct ath10k_htc_hdr)); } -static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep, - struct sk_buff *skb) +void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep, + struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__, + struct ath10k *ar = ep->htc->ar; + struct ath10k_htc_hdr *hdr; + + ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__, ep->eid, skb); + /* A corner case where the copy completion is reaching to host but still + * copy engine is processing it due to which host unmaps corresponding + * memory and causes SMMU fault, hence as workaround adding delay + * the unmapping memory to avoid SMMU faults. + */ + if (ar->hw_params.delay_unmap_buffer && + ep->ul_pipe_id == 3) + mdelay(2); + + hdr = (struct ath10k_htc_hdr *)skb->data; ath10k_htc_restore_tx_skb(ep->htc, skb); if (!ep->ep_ops.ep_tx_complete) { - ath10k_warn("no tx handler for eid %d\n", ep->eid); + ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid); dev_kfree_skb_any(skb); return; } - ep->ep_ops.ep_tx_complete(ep->htc->ar, skb); -} - -/* assumes tx_lock is held */ -static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep) -{ - if (!ep->tx_credit_flow_enabled) - return false; - if (ep->tx_credits >= ep->tx_credits_per_max_message) - return false; + if (hdr->flags & ATH10K_HTC_FLAG_SEND_BUNDLE) { + dev_kfree_skb_any(skb); + return; + } - ath10k_dbg(ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n", - ep->eid); - return true; + ep->ep_ops.ep_tx_complete(ep->htc->ar, skb); } +EXPORT_SYMBOL(ath10k_htc_notify_tx_completion); static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep, struct sk_buff *skb) @@ -103,212 +93,154 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep, struct ath10k_htc_hdr *hdr; hdr = (struct ath10k_htc_hdr *)skb->data; - memset(hdr, 0, sizeof(*hdr)); + memset(hdr, 0, sizeof(struct ath10k_htc_hdr)); hdr->eid = ep->eid; hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr)); + hdr->flags = 0; + if (ep->tx_credit_flow_enabled && !ep->bundle_tx) + hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE; spin_lock_bh(&ep->htc->tx_lock); hdr->seq_no = ep->seq_no++; - - if (ath10k_htc_ep_need_credit_update(ep)) - hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE; - spin_unlock_bh(&ep->htc->tx_lock); } -static int ath10k_htc_issue_skb(struct ath10k_htc *htc, - struct ath10k_htc_ep *ep, - struct sk_buff *skb, - u8 credits) +static int ath10k_htc_consume_credit(struct ath10k_htc_ep *ep, + unsigned int len, + bool consume) { - struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); - int ret; - - ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__, - ep->eid, skb); - - ath10k_htc_prepare_tx_skb(ep, skb); - - ret = ath10k_skb_map(htc->ar->dev, skb); - if (ret) - goto err; - - ret = ath10k_hif_send_head(htc->ar, - ep->ul_pipe_id, - ep->eid, - skb->len, - skb); - if (unlikely(ret)) - goto err; + struct ath10k_htc *htc = ep->htc; + struct ath10k *ar = htc->ar; + enum ath10k_htc_ep_id eid = ep->eid; + int credits, ret = 0; - return 0; -err: - ath10k_warn("HTC issue failed: %d\n", ret); + if (!ep->tx_credit_flow_enabled) + return 0; + credits = DIV_ROUND_UP(len, ep->tx_credit_size); spin_lock_bh(&htc->tx_lock); - ep->tx_credits += credits; - spin_unlock_bh(&htc->tx_lock); - /* this is the simplest way to handle out-of-resources for non-credit - * based endpoints. credit based endpoints can still get -ENOSR, but - * this is highly unlikely as credit reservation should prevent that */ - if (ret == -ENOSR) { - spin_lock_bh(&htc->tx_lock); - __skb_queue_head(&ep->tx_queue, skb); - spin_unlock_bh(&htc->tx_lock); - - return ret; + if (ep->tx_credits < credits) { + ath10k_dbg(ar, ATH10K_DBG_HTC, + "htc insufficient credits ep %d required %d available %d consume %d\n", + eid, credits, ep->tx_credits, consume); + ret = -EAGAIN; + goto unlock; } - skb_cb->is_aborted = true; - ath10k_htc_notify_tx_completion(ep, skb); - - return ret; -} - -static struct sk_buff *ath10k_htc_get_skb_credit_based(struct ath10k_htc *htc, - struct ath10k_htc_ep *ep, - u8 *credits) -{ - struct sk_buff *skb; - struct ath10k_skb_cb *skb_cb; - int credits_required; - int remainder; - unsigned int transfer_len; - - lockdep_assert_held(&htc->tx_lock); - - skb = __skb_dequeue(&ep->tx_queue); - if (!skb) - return NULL; - - skb_cb = ATH10K_SKB_CB(skb); - transfer_len = skb->len; - - if (likely(transfer_len <= htc->target_credit_size)) { - credits_required = 1; - } else { - /* figure out how many credits this message requires */ - credits_required = transfer_len / htc->target_credit_size; - remainder = transfer_len % htc->target_credit_size; - - if (remainder) - credits_required++; + if (consume) { + ep->tx_credits -= credits; + ath10k_dbg(ar, ATH10K_DBG_HTC, + "htc ep %d consumed %d credits total %d\n", + eid, credits, ep->tx_credits); } - ath10k_dbg(ATH10K_DBG_HTC, "Credits required %d got %d\n", - credits_required, ep->tx_credits); - - if (ep->tx_credits < credits_required) { - __skb_queue_head(&ep->tx_queue, skb); - return NULL; - } - - ep->tx_credits -= credits_required; - *credits = credits_required; - return skb; +unlock: + spin_unlock_bh(&htc->tx_lock); + return ret; } -static void ath10k_htc_send_work(struct work_struct *work) +static void ath10k_htc_release_credit(struct ath10k_htc_ep *ep, unsigned int len) { - struct ath10k_htc_ep *ep = container_of(work, - struct ath10k_htc_ep, send_work); struct ath10k_htc *htc = ep->htc; - struct sk_buff *skb; - u8 credits = 0; - int ret; - - while (true) { - if (ep->ul_is_polled) - ath10k_htc_send_complete_check(ep, 0); + struct ath10k *ar = htc->ar; + enum ath10k_htc_ep_id eid = ep->eid; + int credits; - spin_lock_bh(&htc->tx_lock); - if (ep->tx_credit_flow_enabled) - skb = ath10k_htc_get_skb_credit_based(htc, ep, - &credits); - else - skb = __skb_dequeue(&ep->tx_queue); - spin_unlock_bh(&htc->tx_lock); + if (!ep->tx_credit_flow_enabled) + return; - if (!skb) - break; + credits = DIV_ROUND_UP(len, ep->tx_credit_size); + spin_lock_bh(&htc->tx_lock); + ep->tx_credits += credits; + ath10k_dbg(ar, ATH10K_DBG_HTC, + "htc ep %d reverted %d credits back total %d\n", + eid, credits, ep->tx_credits); + spin_unlock_bh(&htc->tx_lock); - ret = ath10k_htc_issue_skb(htc, ep, skb, credits); - if (ret == -ENOSR) - break; - } + if (ep->ep_ops.ep_tx_credits) + ep->ep_ops.ep_tx_credits(htc->ar); } int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid, struct sk_buff *skb) { + struct ath10k *ar = htc->ar; struct ath10k_htc_ep *ep = &htc->endpoint[eid]; + struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); + struct ath10k_hif_sg_item sg_item; + struct device *dev = htc->ar->dev; + int ret; + unsigned int skb_len; + + if (htc->ar->state == ATH10K_STATE_WEDGED) + return -ECOMM; if (eid >= ATH10K_HTC_EP_COUNT) { - ath10k_warn("Invalid endpoint id: %d\n", eid); + ath10k_warn(ar, "Invalid endpoint id: %d\n", eid); return -ENOENT; } skb_push(skb, sizeof(struct ath10k_htc_hdr)); - spin_lock_bh(&htc->tx_lock); - __skb_queue_tail(&ep->tx_queue, skb); - spin_unlock_bh(&htc->tx_lock); - - queue_work(htc->ar->workqueue, &ep->send_work); - return 0; -} + skb_len = skb->len; + ret = ath10k_htc_consume_credit(ep, skb_len, true); + if (ret) + goto err_pull; -static int ath10k_htc_tx_completion_handler(struct ath10k *ar, - struct sk_buff *skb, - unsigned int eid) -{ - struct ath10k_htc *htc = ar->htc; - struct ath10k_htc_ep *ep = &htc->endpoint[eid]; - bool stopping; + ath10k_htc_prepare_tx_skb(ep, skb); - ath10k_htc_notify_tx_completion(ep, skb); - /* the skb now belongs to the completion handler */ + skb_cb->eid = eid; + if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) { + skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, + DMA_TO_DEVICE); + ret = dma_mapping_error(dev, skb_cb->paddr); + if (ret) { + ret = -EIO; + goto err_credits; + } + } - spin_lock_bh(&htc->tx_lock); - stopping = htc->stopping; - spin_unlock_bh(&htc->tx_lock); + sg_item.transfer_id = ep->eid; + sg_item.transfer_context = skb; + sg_item.vaddr = skb->data; + sg_item.paddr = skb_cb->paddr; + sg_item.len = skb->len; - if (!ep->tx_credit_flow_enabled && !stopping) - /* - * note: when using TX credit flow, the re-checking of - * queues happens when credits flow back from the target. - * in the non-TX credit case, we recheck after the packet - * completes - */ - queue_work(ar->workqueue, &ep->send_work); + ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1); + if (ret) + goto err_unmap; return 0; + +err_unmap: + if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) + dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE); +err_credits: + ath10k_htc_release_credit(ep, skb_len); +err_pull: + skb_pull(skb, sizeof(struct ath10k_htc_hdr)); + return ret; } -/* flush endpoint TX queue */ -static void ath10k_htc_flush_endpoint_tx(struct ath10k_htc *htc, - struct ath10k_htc_ep *ep) +void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb) { - struct sk_buff *skb; + struct ath10k_htc *htc = &ar->htc; struct ath10k_skb_cb *skb_cb; + struct ath10k_htc_ep *ep; - spin_lock_bh(&htc->tx_lock); - for (;;) { - skb = __skb_dequeue(&ep->tx_queue); - if (!skb) - break; + if (WARN_ON_ONCE(!skb)) + return; - skb_cb = ATH10K_SKB_CB(skb); - skb_cb->is_aborted = true; - ath10k_htc_notify_tx_completion(ep, skb); - } - spin_unlock_bh(&htc->tx_lock); + skb_cb = ATH10K_SKB_CB(skb); + ep = &htc->endpoint[skb_cb->eid]; - cancel_work_sync(&ep->send_work); + ath10k_htc_notify_tx_completion(ep, skb); + /* the skb now belongs to the completion handler */ } +EXPORT_SYMBOL(ath10k_htc_tx_completion_handler); /***********/ /* Receive */ @@ -320,11 +252,12 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc, int len, enum ath10k_htc_ep_id eid) { + struct ath10k *ar = htc->ar; struct ath10k_htc_ep *ep; int i, n_reports; if (len % sizeof(*report)) - ath10k_warn("Uneven credit report len %d", len); + ath10k_warn(ar, "Uneven credit report len %d", len); n_reports = len / sizeof(*report); @@ -333,23 +266,94 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc, if (report->eid >= ATH10K_HTC_EP_COUNT) break; - ath10k_dbg(ATH10K_DBG_HTC, "ep %d got %d credits\n", - report->eid, report->credits); - ep = &htc->endpoint[report->eid]; ep->tx_credits += report->credits; - if (ep->tx_credits && !skb_queue_empty(&ep->tx_queue)) - queue_work(htc->ar->workqueue, &ep->send_work); + ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n", + report->eid, report->credits, ep->tx_credits); + + if (ep->ep_ops.ep_tx_credits) { + spin_unlock_bh(&htc->tx_lock); + ep->ep_ops.ep_tx_credits(htc->ar); + spin_lock_bh(&htc->tx_lock); + } } spin_unlock_bh(&htc->tx_lock); } -static int ath10k_htc_process_trailer(struct ath10k_htc *htc, - u8 *buffer, - int length, - enum ath10k_htc_ep_id src_eid) +static int +ath10k_htc_process_lookahead(struct ath10k_htc *htc, + const struct ath10k_htc_lookahead_report *report, + int len, + enum ath10k_htc_ep_id eid, + void *next_lookaheads, + int *next_lookaheads_len) { + struct ath10k *ar = htc->ar; + + /* Invalid lookahead flags are actually transmitted by + * the target in the HTC control message. + * Since this will happen at every boot we silently ignore + * the lookahead in this case + */ + if (report->pre_valid != ((~report->post_valid) & 0xFF)) + return 0; + + if (next_lookaheads && next_lookaheads_len) { + ath10k_dbg(ar, ATH10K_DBG_HTC, + "htc rx lookahead found pre_valid 0x%x post_valid 0x%x\n", + report->pre_valid, report->post_valid); + + /* look ahead bytes are valid, copy them over */ + memcpy((u8 *)next_lookaheads, report->lookahead, 4); + + *next_lookaheads_len = 1; + } + + return 0; +} + +static int +ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc, + const struct ath10k_htc_lookahead_bundle *report, + int len, + enum ath10k_htc_ep_id eid, + void *next_lookaheads, + int *next_lookaheads_len) +{ + struct ath10k *ar = htc->ar; + int bundle_cnt = len / sizeof(*report); + + if (!bundle_cnt || (bundle_cnt > htc->max_msgs_per_htc_bundle)) { + ath10k_warn(ar, "Invalid lookahead bundle count: %d\n", + bundle_cnt); + return -EINVAL; + } + + if (next_lookaheads && next_lookaheads_len) { + int i; + + for (i = 0; i < bundle_cnt; i++) { + memcpy(((u8 *)next_lookaheads) + 4 * i, + report->lookahead, 4); + report++; + } + + *next_lookaheads_len = bundle_cnt; + } + + return 0; +} + +int ath10k_htc_process_trailer(struct ath10k_htc *htc, + u8 *buffer, + int length, + enum ath10k_htc_ep_id src_eid, + void *next_lookaheads, + int *next_lookaheads_len) +{ + struct ath10k_htc_lookahead_bundle *bundle; + struct ath10k *ar = htc->ar; int status = 0; struct ath10k_htc_record *record; u8 *orig_buffer; @@ -369,7 +373,7 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc, if (record->hdr.len > length) { /* no room left in buffer for record */ - ath10k_warn("Invalid record length: %d\n", + ath10k_warn(ar, "Invalid record length: %d\n", record->hdr.len); status = -EINVAL; break; @@ -379,7 +383,7 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc, case ATH10K_HTC_RECORD_CREDITS: len = sizeof(struct ath10k_htc_credit_report); if (record->hdr.len < len) { - ath10k_warn("Credit report too long\n"); + ath10k_warn(ar, "Credit report too long\n"); status = -EINVAL; break; } @@ -388,8 +392,31 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc, record->hdr.len, src_eid); break; + case ATH10K_HTC_RECORD_LOOKAHEAD: + len = sizeof(struct ath10k_htc_lookahead_report); + if (record->hdr.len < len) { + ath10k_warn(ar, "Lookahead report too long\n"); + status = -EINVAL; + break; + } + status = ath10k_htc_process_lookahead(htc, + record->lookahead_report, + record->hdr.len, + src_eid, + next_lookaheads, + next_lookaheads_len); + break; + case ATH10K_HTC_RECORD_LOOKAHEAD_BUNDLE: + bundle = record->lookahead_bundle; + status = ath10k_htc_process_lookahead_bundle(htc, + bundle, + record->hdr.len, + src_eid, + next_lookaheads, + next_lookaheads_len); + break; default: - ath10k_warn("Unhandled record: id:%d length:%d\n", + ath10k_warn(ar, "Unhandled record: id:%d length:%d\n", record->hdr.id, record->hdr.len); break; } @@ -403,18 +430,17 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc, } if (status) - ath10k_dbg_dump(ATH10K_DBG_HTC, "htc rx bad trailer", "", + ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "", orig_buffer, orig_length); return status; } +EXPORT_SYMBOL(ath10k_htc_process_trailer); -static int ath10k_htc_rx_completion_handler(struct ath10k *ar, - struct sk_buff *skb, - u8 pipe_id) +void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb) { int status = 0; - struct ath10k_htc *htc = ar->htc; + struct ath10k_htc *htc = &ar->htc; struct ath10k_htc_hdr *hdr; struct ath10k_htc_ep *ep; u16 payload_len; @@ -429,42 +455,34 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar, eid = hdr->eid; if (eid >= ATH10K_HTC_EP_COUNT) { - ath10k_warn("HTC Rx: invalid eid %d\n", eid); - ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad header", "", + ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid); + ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "", hdr, sizeof(*hdr)); - status = -EINVAL; goto out; } ep = &htc->endpoint[eid]; - - /* - * If this endpoint that received a message from the target has - * a to-target HIF pipe whose send completions are polled rather - * than interrupt-driven, this is a good point to ask HIF to check - * whether it has any completed sends to handle. - */ - if (ep->ul_is_polled) - ath10k_htc_send_complete_check(ep, 1); + if (ep->service_id == ATH10K_HTC_SVC_ID_UNUSED) { + ath10k_warn(ar, "htc rx endpoint %d is not connected\n", eid); + goto out; + } payload_len = __le16_to_cpu(hdr->len); if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) { - ath10k_warn("HTC rx frame too long, len: %zu\n", + ath10k_warn(ar, "HTC rx frame too long, len: %zu\n", payload_len + sizeof(*hdr)); - ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len", "", + ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "", hdr, sizeof(*hdr)); - status = -EINVAL; goto out; } if (skb->len < payload_len) { - ath10k_dbg(ATH10K_DBG_HTC, + ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC Rx: insufficient length, got %d, expected %d\n", skb->len, payload_len); - ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len", + ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "", hdr, sizeof(*hdr)); - status = -EINVAL; goto out; } @@ -478,9 +496,8 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar, if ((trailer_len < min_len) || (trailer_len > payload_len)) { - ath10k_warn("Invalid trailer length: %d\n", + ath10k_warn(ar, "Invalid trailer length: %d\n", trailer_len); - status = -EPROTO; goto out; } @@ -489,7 +506,8 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar, trailer += payload_len; trailer -= trailer_len; status = ath10k_htc_process_trailer(htc, trailer, - trailer_len, hdr->eid); + trailer_len, hdr->eid, + NULL, NULL); if (status) goto out; @@ -500,39 +518,7 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar, /* zero length packet with trailer data, just drop these */ goto out; - if (eid == ATH10K_HTC_EP_0) { - struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data; - - switch (__le16_to_cpu(msg->hdr.message_id)) { - default: - /* handle HTC control message */ - if (completion_done(&htc->ctl_resp)) { - /* - * this is a fatal error, target should not be - * sending unsolicited messages on the ep 0 - */ - ath10k_warn("HTC rx ctrl still processing\n"); - status = -EINVAL; - complete(&htc->ctl_resp); - goto out; - } - - htc->control_resp_len = - min_t(int, skb->len, - ATH10K_HTC_MAX_CTRL_MSG_LEN); - - memcpy(htc->control_resp_buffer, skb->data, - htc->control_resp_len); - - complete(&htc->ctl_resp); - break; - case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE: - htc->htc_ops.target_send_suspend_complete(ar); - } - goto out; - } - - ath10k_dbg(ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n", + ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n", eid, skb); ep->ep_ops.ep_rx_complete(ar, skb); @@ -540,16 +526,46 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar, skb = NULL; out: kfree_skb(skb); - - return status; } +EXPORT_SYMBOL(ath10k_htc_rx_completion_handler); static void ath10k_htc_control_rx_complete(struct ath10k *ar, struct sk_buff *skb) { - /* This is unexpected. FW is not supposed to send regular rx on this - * endpoint. */ - ath10k_warn("unexpected htc rx\n"); + struct ath10k_htc *htc = &ar->htc; + struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data; + + switch (__le16_to_cpu(msg->hdr.message_id)) { + case ATH10K_HTC_MSG_READY_ID: + case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID: + /* handle HTC control message */ + if (completion_done(&htc->ctl_resp)) { + /* this is a fatal error, target should not be + * sending unsolicited messages on the ep 0 + */ + ath10k_warn(ar, "HTC rx ctrl still processing\n"); + complete(&htc->ctl_resp); + goto out; + } + + htc->control_resp_len = + min_t(int, skb->len, + ATH10K_HTC_MAX_CTRL_MSG_LEN); + + memcpy(htc->control_resp_buffer, skb->data, + htc->control_resp_len); + + complete(&htc->ctl_resp); + break; + case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE: + htc->htc_ops.target_send_suspend_complete(ar); + break; + default: + ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n"); + break; + } + +out: kfree_skb(skb); } @@ -580,8 +596,14 @@ static const char *htc_service_name(enum ath10k_htc_svc_id id) return "NMI Data"; case ATH10K_HTC_SVC_ID_HTT_DATA_MSG: return "HTT Data"; + case ATH10K_HTC_SVC_ID_HTT_DATA2_MSG: + return "HTT Data"; + case ATH10K_HTC_SVC_ID_HTT_DATA3_MSG: + return "HTT Data"; case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS: return "RAW"; + case ATH10K_HTC_SVC_ID_HTT_LOG_MSG: + return "PKTLOG"; } return "Unknown"; @@ -598,134 +620,403 @@ static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc) ep->max_ep_message_len = 0; ep->max_tx_queue_depth = 0; ep->eid = i; - skb_queue_head_init(&ep->tx_queue); ep->htc = htc; ep->tx_credit_flow_enabled = true; - INIT_WORK(&ep->send_work, ath10k_htc_send_work); } } -static void ath10k_htc_setup_target_buffer_assignments(struct ath10k_htc *htc) +static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc, + u16 service_id) { - struct ath10k_htc_svc_tx_credits *entry; - - entry = &htc->service_tx_alloc[0]; + u8 allocation = 0; - /* - * for PCIE allocate all credists/HTC buffers to WMI. - * no buffers are used/required for data. data always - * remains on host. + /* The WMI control service is the only service with flow control. + * Let it have all transmit credits. */ - entry++; - entry->service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL; - entry->credit_allocation = htc->total_transmit_credits; + if (service_id == ATH10K_HTC_SVC_ID_WMI_CONTROL) + allocation = htc->total_transmit_credits; + + return allocation; } -static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc, - u16 service_id) +static int ath10k_htc_send_bundle(struct ath10k_htc_ep *ep, + struct sk_buff *bundle_skb, + struct sk_buff_head *tx_save_head) { - u8 allocation = 0; + struct ath10k_hif_sg_item sg_item; + struct ath10k_htc *htc = ep->htc; + struct ath10k *ar = htc->ar; + struct sk_buff *skb; + int ret, cn = 0; + unsigned int skb_len; + + ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle skb len %d\n", bundle_skb->len); + skb_len = bundle_skb->len; + ret = ath10k_htc_consume_credit(ep, skb_len, true); + + if (!ret) { + sg_item.transfer_id = ep->eid; + sg_item.transfer_context = bundle_skb; + sg_item.vaddr = bundle_skb->data; + sg_item.len = bundle_skb->len; + + ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1); + if (ret) + ath10k_htc_release_credit(ep, skb_len); + } + + if (ret) + dev_kfree_skb_any(bundle_skb); + + for (cn = 0; (skb = skb_dequeue_tail(tx_save_head)); cn++) { + if (ret) { + skb_pull(skb, sizeof(struct ath10k_htc_hdr)); + skb_queue_head(&ep->tx_req_head, skb); + } else { + skb_queue_tail(&ep->tx_complete_head, skb); + } + } + + if (!ret) + queue_work(ar->workqueue_tx_complete, &ar->tx_complete_work); + + ath10k_dbg(ar, ATH10K_DBG_HTC, + "bundle tx status %d eid %d req count %d count %d len %d\n", + ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, skb_len); + return ret; +} + +static void ath10k_htc_send_one_skb(struct ath10k_htc_ep *ep, struct sk_buff *skb) +{ + struct ath10k_htc *htc = ep->htc; + struct ath10k *ar = htc->ar; + int ret; + + ret = ath10k_htc_send(htc, ep->eid, skb); + + if (ret) + skb_queue_head(&ep->tx_req_head, skb); + + ath10k_dbg(ar, ATH10K_DBG_HTC, "tx one status %d eid %d len %d pending count %d\n", + ret, ep->eid, skb->len, skb_queue_len(&ep->tx_req_head)); +} + +static int ath10k_htc_send_bundle_skbs(struct ath10k_htc_ep *ep) +{ + struct ath10k_htc *htc = ep->htc; + struct sk_buff *bundle_skb, *skb; + struct sk_buff_head tx_save_head; + struct ath10k_htc_hdr *hdr; + u8 *bundle_buf; + int ret = 0, credit_pad, credit_remainder, trans_len, bundles_left = 0; + + if (htc->ar->state == ATH10K_STATE_WEDGED) + return -ECOMM; + + if (ep->tx_credit_flow_enabled && + ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE) + return 0; + + bundles_left = ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size; + bundle_skb = dev_alloc_skb(bundles_left); + + if (!bundle_skb) + return -ENOMEM; + + bundle_buf = bundle_skb->data; + skb_queue_head_init(&tx_save_head); + + while (true) { + skb = skb_dequeue(&ep->tx_req_head); + if (!skb) + break; + + credit_pad = 0; + trans_len = skb->len + sizeof(*hdr); + credit_remainder = trans_len % ep->tx_credit_size; + + if (credit_remainder != 0) { + credit_pad = ep->tx_credit_size - credit_remainder; + trans_len += credit_pad; + } + + ret = ath10k_htc_consume_credit(ep, + bundle_buf + trans_len - bundle_skb->data, + false); + if (ret) { + skb_queue_head(&ep->tx_req_head, skb); + break; + } + + if (bundles_left < trans_len) { + bundle_skb->len = bundle_buf - bundle_skb->data; + ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head); + + if (ret) { + skb_queue_head(&ep->tx_req_head, skb); + return ret; + } + + if (skb_queue_len(&ep->tx_req_head) == 0) { + ath10k_htc_send_one_skb(ep, skb); + return ret; + } + + if (ep->tx_credit_flow_enabled && + ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE) { + skb_queue_head(&ep->tx_req_head, skb); + return 0; + } + + bundles_left = + ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size; + bundle_skb = dev_alloc_skb(bundles_left); + + if (!bundle_skb) { + skb_queue_head(&ep->tx_req_head, skb); + return -ENOMEM; + } + bundle_buf = bundle_skb->data; + skb_queue_head_init(&tx_save_head); + } + + skb_push(skb, sizeof(struct ath10k_htc_hdr)); + ath10k_htc_prepare_tx_skb(ep, skb); + + memcpy(bundle_buf, skb->data, skb->len); + hdr = (struct ath10k_htc_hdr *)bundle_buf; + hdr->flags |= ATH10K_HTC_FLAG_SEND_BUNDLE; + hdr->pad_len = __cpu_to_le16(credit_pad); + bundle_buf += trans_len; + bundles_left -= trans_len; + skb_queue_tail(&tx_save_head, skb); + } + + if (bundle_buf != bundle_skb->data) { + bundle_skb->len = bundle_buf - bundle_skb->data; + ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head); + } else { + dev_kfree_skb_any(bundle_skb); + } + + return ret; +} + +static void ath10k_htc_bundle_tx_work(struct work_struct *work) +{ + struct ath10k *ar = container_of(work, struct ath10k, bundle_tx_work); + struct ath10k_htc_ep *ep; + struct sk_buff *skb; int i; - for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) { - if (htc->service_tx_alloc[i].service_id == service_id) - allocation = - htc->service_tx_alloc[i].credit_allocation; + for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) { + ep = &ar->htc.endpoint[i]; + + if (!ep->bundle_tx) + continue; + + ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx work eid %d count %d\n", + ep->eid, skb_queue_len(&ep->tx_req_head)); + + if (skb_queue_len(&ep->tx_req_head) >= + ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE) { + ath10k_htc_send_bundle_skbs(ep); + } else { + skb = skb_dequeue(&ep->tx_req_head); + + if (!skb) + continue; + ath10k_htc_send_one_skb(ep, skb); + } } +} - return allocation; +static void ath10k_htc_tx_complete_work(struct work_struct *work) +{ + struct ath10k *ar = container_of(work, struct ath10k, tx_complete_work); + struct ath10k_htc_ep *ep; + enum ath10k_htc_ep_id eid; + struct sk_buff *skb; + int i; + + for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) { + ep = &ar->htc.endpoint[i]; + eid = ep->eid; + if (ep->bundle_tx && eid == ar->htt.eid) { + ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx complete eid %d pending complete count%d\n", + ep->eid, skb_queue_len(&ep->tx_complete_head)); + + while (true) { + skb = skb_dequeue(&ep->tx_complete_head); + if (!skb) + break; + ath10k_htc_notify_tx_completion(ep, skb); + } + } + } +} + +int ath10k_htc_send_hl(struct ath10k_htc *htc, + enum ath10k_htc_ep_id eid, + struct sk_buff *skb) +{ + struct ath10k_htc_ep *ep = &htc->endpoint[eid]; + struct ath10k *ar = htc->ar; + + if (sizeof(struct ath10k_htc_hdr) + skb->len > ep->tx_credit_size) { + ath10k_dbg(ar, ATH10K_DBG_HTC, "tx exceed max len %d\n", skb->len); + return -ENOMEM; + } + + ath10k_dbg(ar, ATH10K_DBG_HTC, "htc send hl eid %d bundle %d tx count %d len %d\n", + eid, ep->bundle_tx, skb_queue_len(&ep->tx_req_head), skb->len); + + if (ep->bundle_tx) { + skb_queue_tail(&ep->tx_req_head, skb); + queue_work(ar->workqueue, &ar->bundle_tx_work); + return 0; + } else { + return ath10k_htc_send(htc, eid, skb); + } +} + +void ath10k_htc_setup_tx_req(struct ath10k_htc_ep *ep) +{ + if (ep->htc->max_msgs_per_htc_bundle >= ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE && + !ep->bundle_tx) { + ep->bundle_tx = true; + skb_queue_head_init(&ep->tx_req_head); + skb_queue_head_init(&ep->tx_complete_head); + } +} + +void ath10k_htc_stop_hl(struct ath10k *ar) +{ + struct ath10k_htc_ep *ep; + int i; + + cancel_work_sync(&ar->bundle_tx_work); + cancel_work_sync(&ar->tx_complete_work); + + for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) { + ep = &ar->htc.endpoint[i]; + + if (!ep->bundle_tx) + continue; + + ath10k_dbg(ar, ATH10K_DBG_HTC, "stop tx work eid %d count %d\n", + ep->eid, skb_queue_len(&ep->tx_req_head)); + + skb_queue_purge(&ep->tx_req_head); + } } int ath10k_htc_wait_target(struct ath10k_htc *htc) { - int status = 0; - struct ath10k_htc_svc_conn_req conn_req; - struct ath10k_htc_svc_conn_resp conn_resp; + struct ath10k *ar = htc->ar; + int i, status = 0; + unsigned long time_left; struct ath10k_htc_msg *msg; u16 message_id; - u16 credit_count; - u16 credit_size; - INIT_COMPLETION(htc->ctl_resp); + time_left = wait_for_completion_timeout(&htc->ctl_resp, + ATH10K_HTC_WAIT_TIMEOUT_HZ); + if (!time_left) { + /* Workaround: In some cases the PCI HIF doesn't + * receive interrupt for the control response message + * even if the buffer was completed. It is suspected + * iomap writes unmasking PCI CE irqs aren't propagated + * properly in KVM PCI-passthrough sometimes. + */ + ath10k_warn(ar, "failed to receive control response completion, polling..\n"); - status = ath10k_hif_start(htc->ar); - if (status) { - ath10k_err("could not start HIF (%d)\n", status); - goto err_start; - } + for (i = 0; i < CE_COUNT; i++) + ath10k_hif_send_complete_check(htc->ar, i, 1); - status = wait_for_completion_timeout(&htc->ctl_resp, - ATH10K_HTC_WAIT_TIMEOUT_HZ); - if (status <= 0) { - if (status == 0) + time_left = + wait_for_completion_timeout(&htc->ctl_resp, + ATH10K_HTC_WAIT_TIMEOUT_HZ); + + if (!time_left) status = -ETIMEDOUT; + } - ath10k_err("ctl_resp never came in (%d)\n", status); - goto err_target; + if (status < 0) { + ath10k_err(ar, "ctl_resp never came in (%d)\n", status); + return status; } if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) { - ath10k_err("Invalid HTC ready msg len:%d\n", + ath10k_err(ar, "Invalid HTC ready msg len:%d\n", htc->control_resp_len); - - status = -ECOMM; - goto err_target; + return -ECOMM; } msg = (struct ath10k_htc_msg *)htc->control_resp_buffer; message_id = __le16_to_cpu(msg->hdr.message_id); - credit_count = __le16_to_cpu(msg->ready.credit_count); - credit_size = __le16_to_cpu(msg->ready.credit_size); if (message_id != ATH10K_HTC_MSG_READY_ID) { - ath10k_err("Invalid HTC ready msg: 0x%x\n", message_id); - status = -ECOMM; - goto err_target; + ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id); + return -ECOMM; } - htc->total_transmit_credits = credit_count; - htc->target_credit_size = credit_size; + if (ar->hw_params.use_fw_tx_credits) + htc->total_transmit_credits = __le16_to_cpu(msg->ready.credit_count); + else + htc->total_transmit_credits = 1; + + htc->target_credit_size = __le16_to_cpu(msg->ready.credit_size); - ath10k_dbg(ATH10K_DBG_HTC, - "Target ready! transmit resources: %d size:%d\n", + ath10k_dbg(ar, ATH10K_DBG_HTC, + "Target ready! transmit resources: %d size:%d actual credits:%d\n", htc->total_transmit_credits, - htc->target_credit_size); + htc->target_credit_size, + msg->ready.credit_count); if ((htc->total_transmit_credits == 0) || (htc->target_credit_size == 0)) { - status = -ECOMM; - ath10k_err("Invalid credit size received\n"); - goto err_target; + ath10k_err(ar, "Invalid credit size received\n"); + return -ECOMM; } - ath10k_htc_setup_target_buffer_assignments(htc); - - /* setup our pseudo HTC control endpoint connection */ - memset(&conn_req, 0, sizeof(conn_req)); - memset(&conn_resp, 0, sizeof(conn_resp)); - conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete; - conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete; - conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS; - conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL; - - /* connect fake service */ - status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp); - if (status) { - ath10k_err("could not connect to htc service (%d)\n", status); - goto err_target; + /* The only way to determine if the ready message is an extended + * message is from the size. + */ + if (htc->control_resp_len >= + sizeof(msg->hdr) + sizeof(msg->ready_ext)) { + htc->alt_data_credit_size = + __le16_to_cpu(msg->ready_ext.reserved) & + ATH10K_HTC_MSG_READY_EXT_ALT_DATA_MASK; + htc->max_msgs_per_htc_bundle = + min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle, + HTC_HOST_MAX_MSG_PER_RX_BUNDLE); + ath10k_dbg(ar, ATH10K_DBG_HTC, + "Extended ready message RX bundle size %d alt size %d\n", + htc->max_msgs_per_htc_bundle, + htc->alt_data_credit_size); } + INIT_WORK(&ar->bundle_tx_work, ath10k_htc_bundle_tx_work); + INIT_WORK(&ar->tx_complete_work, ath10k_htc_tx_complete_work); + return 0; -err_target: - ath10k_hif_stop(htc->ar); -err_start: - return status; +} + +void ath10k_htc_change_tx_credit_flow(struct ath10k_htc *htc, + enum ath10k_htc_ep_id eid, + bool enable) +{ + struct ath10k *ar = htc->ar; + struct ath10k_htc_ep *ep = &ar->htc.endpoint[eid]; + + ep->tx_credit_flow_enabled = enable; } int ath10k_htc_connect_service(struct ath10k_htc *htc, struct ath10k_htc_svc_conn_req *conn_req, struct ath10k_htc_svc_conn_resp *conn_resp) { + struct ath10k *ar = htc->ar; struct ath10k_htc_msg *msg; struct ath10k_htc_conn_svc *req_msg; struct ath10k_htc_conn_svc_response resp_msg_dummy; @@ -735,6 +1026,7 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, struct sk_buff *skb; unsigned int max_msg_size = 0; int length, status; + unsigned long time_left; bool disable_credit_flow_ctrl = false; u16 message_id, service_id, flags = 0; u8 tx_alloc = 0; @@ -751,12 +1043,13 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, tx_alloc = ath10k_htc_get_credit_allocation(htc, conn_req->service_id); if (!tx_alloc) - ath10k_warn("HTC Service %s does not allocate target credits\n", - htc_service_name(conn_req->service_id)); + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot htc service %s does not allocate target credits\n", + htc_service_name(conn_req->service_id)); skb = ath10k_htc_build_tx_ctrl_skb(htc->ar); if (!skb) { - ath10k_err("Failed to allocate HTC packet\n"); + ath10k_err(ar, "Failed to allocate HTC packet\n"); return -ENOMEM; } @@ -770,17 +1063,17 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC); - req_msg = &msg->connect_service; - req_msg->flags = __cpu_to_le16(flags); - req_msg->service_id = __cpu_to_le16(conn_req->service_id); - /* Only enable credit flow control for WMI ctrl service */ if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) { flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL; disable_credit_flow_ctrl = true; } - INIT_COMPLETION(htc->ctl_resp); + req_msg = &msg->connect_service; + req_msg->flags = __cpu_to_le16(flags); + req_msg->service_id = __cpu_to_le16(conn_req->service_id); + + reinit_completion(&htc->ctl_resp); status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb); if (status) { @@ -789,13 +1082,11 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, } /* wait for response */ - status = wait_for_completion_timeout(&htc->ctl_resp, - ATH10K_HTC_CONN_SVC_TIMEOUT_HZ); - if (status <= 0) { - if (status == 0) - status = -ETIMEDOUT; - ath10k_err("Service connect timeout: %d\n", status); - return status; + time_left = wait_for_completion_timeout(&htc->ctl_resp, + ATH10K_HTC_CONN_SVC_TIMEOUT_HZ); + if (!time_left) { + ath10k_err(ar, "Service connect timeout\n"); + return -ETIMEDOUT; } /* we controlled the buffer creation, it's aligned */ @@ -807,11 +1098,11 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) || (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->connect_service_response))) { - ath10k_err("Invalid resp message ID 0x%x", message_id); + ath10k_err(ar, "Invalid resp message ID 0x%x", message_id); return -EPROTO; } - ath10k_dbg(ATH10K_DBG_HTC, + ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n", htc_service_name(service_id), resp_msg->status, resp_msg->eid); @@ -820,7 +1111,7 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, /* check response status */ if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) { - ath10k_err("HTC Service %s connect request failed: 0x%x)\n", + ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n", htc_service_name(service_id), resp_msg->status); return -EPROTO; @@ -853,11 +1144,10 @@ setup: ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size); ep->tx_credits = tx_alloc; ep->tx_credit_size = htc->target_credit_size; - ep->tx_credits_per_max_message = ep->max_ep_message_len / - htc->target_credit_size; - if (ep->max_ep_message_len % htc->target_credit_size) - ep->tx_credits_per_max_message++; + if (conn_req->service_id == ATH10K_HTC_SVC_ID_HTT_DATA_MSG && + htc->alt_data_credit_size != 0) + ep->tx_credit_size = htc->alt_data_credit_size; /* copy all the callbacks */ ep->ep_ops = conn_req->ep_ops; @@ -865,52 +1155,98 @@ setup: status = ath10k_hif_map_service_to_pipe(htc->ar, ep->service_id, &ep->ul_pipe_id, - &ep->dl_pipe_id, - &ep->ul_is_polled, - &ep->dl_is_polled); - if (status) + &ep->dl_pipe_id); + if (status) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC service id: %d\n", + ep->service_id); return status; + } - ath10k_dbg(ATH10K_DBG_HTC, - "HTC service: %s UL pipe: %d DL pipe: %d eid: %d ready\n", + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n", htc_service_name(ep->service_id), ep->ul_pipe_id, ep->dl_pipe_id, ep->eid); - ath10k_dbg(ATH10K_DBG_HTC, - "EP %d UL polled: %d, DL polled: %d\n", - ep->eid, ep->ul_is_polled, ep->dl_is_polled); - if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) { ep->tx_credit_flow_enabled = false; - ath10k_dbg(ATH10K_DBG_HTC, - "HTC service: %s eid: %d TX flow control disabled\n", + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot htc service '%s' eid %d TX flow control disabled\n", htc_service_name(ep->service_id), assigned_eid); } return status; } -struct sk_buff *ath10k_htc_alloc_skb(int size) +struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size) { struct sk_buff *skb; skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr)); - if (!skb) { - ath10k_warn("could not allocate HTC tx skb\n"); + if (!skb) return NULL; - } skb_reserve(skb, sizeof(struct ath10k_htc_hdr)); /* FW/HTC requires 4-byte aligned streams */ if (!IS_ALIGNED((unsigned long)skb->data, 4)) - ath10k_warn("Unaligned HTC tx skb\n"); + ath10k_warn(ar, "Unaligned HTC tx skb\n"); return skb; } +static void ath10k_htc_pktlog_process_rx(struct ath10k *ar, struct sk_buff *skb) +{ + trace_ath10k_htt_pktlog(ar, skb->data, skb->len); + dev_kfree_skb_any(skb); +} + +static int ath10k_htc_pktlog_connect(struct ath10k *ar) +{ + struct ath10k_htc_svc_conn_resp conn_resp; + struct ath10k_htc_svc_conn_req conn_req; + int status; + + memset(&conn_req, 0, sizeof(conn_req)); + memset(&conn_resp, 0, sizeof(conn_resp)); + + conn_req.ep_ops.ep_tx_complete = NULL; + conn_req.ep_ops.ep_rx_complete = ath10k_htc_pktlog_process_rx; + conn_req.ep_ops.ep_tx_credits = NULL; + + /* connect to control service */ + conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_LOG_MSG; + status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp); + if (status) { + ath10k_warn(ar, "failed to connect to PKTLOG service: %d\n", + status); + return status; + } + + return 0; +} + +static bool ath10k_htc_pktlog_svc_supported(struct ath10k *ar) +{ + u8 ul_pipe_id; + u8 dl_pipe_id; + int status; + + status = ath10k_hif_map_service_to_pipe(ar, ATH10K_HTC_SVC_ID_HTT_LOG_MSG, + &ul_pipe_id, + &dl_pipe_id); + if (status) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC pktlog service id: %d\n", + ATH10K_HTC_SVC_ID_HTT_LOG_MSG); + + return false; + } + + return true; +} + int ath10k_htc_start(struct ath10k_htc *htc) { + struct ath10k *ar = htc->ar; struct sk_buff *skb; int status = 0; struct ath10k_htc_msg *msg; @@ -926,7 +1262,14 @@ int ath10k_htc_start(struct ath10k_htc *htc) msg->hdr.message_id = __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID); - ath10k_dbg(ATH10K_DBG_HTC, "HTC is using TX credit flow control\n"); + if (ar->hif.bus == ATH10K_BUS_SDIO) { + /* Extra setup params used by SDIO */ + msg->setup_complete_ext.flags = + __cpu_to_le32(ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN); + msg->setup_complete_ext.max_msgs_per_bundled_recv = + htc->max_msgs_per_htc_bundle; + } + ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n"); status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb); if (status) { @@ -934,67 +1277,48 @@ int ath10k_htc_start(struct ath10k_htc *htc) return status; } - return 0; -} - -/* - * stop HTC communications, i.e. stop interrupt reception, and flush all - * queued buffers - */ -void ath10k_htc_stop(struct ath10k_htc *htc) -{ - int i; - struct ath10k_htc_ep *ep; - - spin_lock_bh(&htc->tx_lock); - htc->stopping = true; - spin_unlock_bh(&htc->tx_lock); - - for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) { - ep = &htc->endpoint[i]; - ath10k_htc_flush_endpoint_tx(htc, ep); + if (ath10k_htc_pktlog_svc_supported(ar)) { + status = ath10k_htc_pktlog_connect(ar); + if (status) { + ath10k_err(ar, "failed to connect to pktlog: %d\n", status); + return status; + } } - ath10k_hif_stop(htc->ar); - ath10k_htc_reset_endpoint_states(htc); + return 0; } /* registered target arrival callback from the HIF layer */ -struct ath10k_htc *ath10k_htc_create(struct ath10k *ar, - struct ath10k_htc_ops *htc_ops) +int ath10k_htc_init(struct ath10k *ar) { - struct ath10k_hif_cb htc_callbacks; - struct ath10k_htc_ep *ep = NULL; - struct ath10k_htc *htc = NULL; - - /* FIXME: use struct ath10k instead */ - htc = kzalloc(sizeof(struct ath10k_htc), GFP_KERNEL); - if (!htc) - return ERR_PTR(-ENOMEM); + int status; + struct ath10k_htc *htc = &ar->htc; + struct ath10k_htc_svc_conn_req conn_req; + struct ath10k_htc_svc_conn_resp conn_resp; spin_lock_init(&htc->tx_lock); - memcpy(&htc->htc_ops, htc_ops, sizeof(struct ath10k_htc_ops)); - ath10k_htc_reset_endpoint_states(htc); - /* setup HIF layer callbacks */ - htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler; - htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler; htc->ar = ar; - /* Get HIF default pipe for HTC message exchange */ - ep = &htc->endpoint[ATH10K_HTC_EP_0]; + /* setup our pseudo HTC control endpoint connection */ + memset(&conn_req, 0, sizeof(conn_req)); + memset(&conn_resp, 0, sizeof(conn_resp)); + conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete; + conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete; + conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS; + conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL; - ath10k_hif_init(ar, &htc_callbacks); - ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id); + /* connect fake service */ + status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp); + if (status) { + ath10k_err(ar, "could not connect to htc service (%d)\n", + status); + return status; + } init_completion(&htc->ctl_resp); - return htc; -} - -void ath10k_htc_destroy(struct ath10k_htc *htc) -{ - kfree(htc); + return 0; } diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h index fa45844b59fb..7ff665020015 100644 --- a/drivers/net/wireless/ath/ath10k/htc.h +++ b/drivers/net/wireless/ath/ath10k/htc.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2016 Qualcomm Atheros, Inc. */ #ifndef _HTC_H_ @@ -22,8 +11,8 @@ #include <linux/list.h> #include <linux/bug.h> #include <linux/skbuff.h> -#include <linux/semaphore.h> #include <linux/timer.h> +#include <linux/bitfield.h> struct ath10k; @@ -51,16 +40,37 @@ struct ath10k; * 4-byte aligned. */ +#define HTC_HOST_MAX_MSG_PER_RX_BUNDLE 32 + enum ath10k_htc_tx_flags { ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01, ATH10K_HTC_FLAG_SEND_BUNDLE = 0x02 }; enum ath10k_htc_rx_flags { + ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK = 0x01, ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02, - ATH10K_HTC_FLAG_BUNDLE_MASK = 0xF0 }; +#define ATH10K_HTC_FLAG_BUNDLE_MASK GENMASK(7, 4) + +/* bits 2-3 are for extra bundle count bits 4-5 */ +#define ATH10K_HTC_BUNDLE_EXTRA_MASK GENMASK(3, 2) +#define ATH10K_HTC_BUNDLE_EXTRA_SHIFT 4 + +static inline unsigned int ath10k_htc_get_bundle_count(u8 max_msgs, u8 flags) +{ + unsigned int count, extra_count = 0; + + count = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, flags); + + if (max_msgs > 16) + extra_count = FIELD_GET(ATH10K_HTC_BUNDLE_EXTRA_MASK, flags) << + ATH10K_HTC_BUNDLE_EXTRA_SHIFT; + + return count + extra_count; +} + struct ath10k_htc_hdr { u8 eid; /* @enum ath10k_htc_ep_id */ u8 flags; /* @enum ath10k_htc_tx_flags, ath10k_htc_rx_flags */ @@ -73,8 +83,14 @@ struct ath10k_htc_hdr { u8 seq_no; /* for tx */ u8 control_byte1; } __packed; - u8 pad0; - u8 pad1; + union { + __le16 pad_len; + struct { + u8 pad0; + u8 pad1; + } __packed; + } __packed; + } __packed __aligned(4); enum ath10k_ath10k_htc_msg_id { @@ -103,6 +119,8 @@ enum ath10k_htc_conn_flags { #define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_LSB 8 }; +#define ATH10K_HTC_MSG_READY_EXT_ALT_DATA_MASK 0xFFF + enum ath10k_htc_conn_svc_status { ATH10K_HTC_CONN_SVC_STATUS_SUCCESS = 0, ATH10K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1, @@ -111,6 +129,14 @@ enum ath10k_htc_conn_svc_status { ATH10K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4 }; +#define ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE 32 +#define ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE 2 +#define ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE 2 + +enum ath10k_htc_setup_complete_flags { + ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN = 1 +}; + struct ath10k_ath10k_htc_msg_hdr { __le16 message_id; /* @enum htc_message_id */ } __packed; @@ -131,8 +157,14 @@ struct ath10k_htc_ready_extended { struct ath10k_htc_ready base; u8 htc_version; /* @enum ath10k_htc_version */ u8 max_msgs_per_htc_bundle; - u8 pad0; - u8 pad1; + union { + __le16 reserved; + struct { + u8 pad0; + u8 pad1; + } __packed; + } __packed; + } __packed; struct ath10k_htc_conn_svc { @@ -175,8 +207,10 @@ struct ath10k_htc_msg { } __packed __aligned(4); enum ath10k_ath10k_htc_record_id { - ATH10K_HTC_RECORD_NULL = 0, - ATH10K_HTC_RECORD_CREDITS = 1 + ATH10K_HTC_RECORD_NULL = 0, + ATH10K_HTC_RECORD_CREDITS = 1, + ATH10K_HTC_RECORD_LOOKAHEAD = 2, + ATH10K_HTC_RECORD_LOOKAHEAD_BUNDLE = 3, }; struct ath10k_ath10k_htc_record_hdr { @@ -193,28 +227,31 @@ struct ath10k_htc_credit_report { u8 pad1; } __packed; +struct ath10k_htc_lookahead_report { + u8 pre_valid; + u8 pad0; + u8 pad1; + u8 pad2; + u8 lookahead[4]; + u8 post_valid; + u8 pad3; + u8 pad4; + u8 pad5; +} __packed; + +struct ath10k_htc_lookahead_bundle { + u8 lookahead[4]; +} __packed; + struct ath10k_htc_record { struct ath10k_ath10k_htc_record_hdr hdr; union { - struct ath10k_htc_credit_report credit_report[0]; - u8 pauload[0]; + DECLARE_FLEX_ARRAY(struct ath10k_htc_credit_report, credit_report); + DECLARE_FLEX_ARRAY(struct ath10k_htc_lookahead_report, lookahead_report); + DECLARE_FLEX_ARRAY(struct ath10k_htc_lookahead_bundle, lookahead_bundle); }; } __packed __aligned(4); -/* - * note: the trailer offset is dynamic depending - * on payload length. this is only a struct layout draft - */ -struct ath10k_htc_frame { - struct ath10k_htc_hdr hdr; - union { - struct ath10k_htc_msg msg; - u8 payload[0]; - }; - struct ath10k_htc_record trailer[0]; -} __packed __aligned(4); - - /*******************/ /* Host-side stuff */ /*******************/ @@ -224,6 +261,7 @@ enum ath10k_htc_svc_gid { ATH10K_HTC_SVC_GRP_WMI = 1, ATH10K_HTC_SVC_GRP_NMI = 2, ATH10K_HTC_SVC_GRP_HTT = 3, + ATH10K_LOG_SERVICE_GROUP = 6, ATH10K_HTC_SVC_GRP_TEST = 254, ATH10K_HTC_SVC_GRP_LAST = 255, @@ -249,6 +287,9 @@ enum ath10k_htc_svc_id { ATH10K_HTC_SVC_ID_HTT_DATA_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 0), + ATH10K_HTC_SVC_ID_HTT_DATA2_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 1), + ATH10K_HTC_SVC_ID_HTT_DATA3_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 2), + ATH10K_HTC_SVC_ID_HTT_LOG_MSG = SVC(ATH10K_LOG_SERVICE_GROUP, 0), /* raw stream service (i.e. flash, tcmd, calibration apps) */ ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH10K_HTC_SVC_GRP_TEST, 0), }; @@ -276,6 +317,7 @@ struct ath10k_htc_ops { struct ath10k_htc_ep_ops { void (*ep_tx_complete)(struct ath10k *, struct sk_buff *); void (*ep_rx_complete)(struct ath10k *, struct sk_buff *); + void (*ep_tx_credits)(struct ath10k *); }; /* service connection information */ @@ -297,10 +339,10 @@ struct ath10k_htc_svc_conn_resp { #define ATH10K_NUM_CONTROL_TX_BUFFERS 2 #define ATH10K_HTC_MAX_LEN 4096 #define ATH10K_HTC_MAX_CTRL_MSG_LEN 256 -#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1*HZ) +#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1 * HZ) #define ATH10K_HTC_CONTROL_BUFFER_SIZE (ATH10K_HTC_MAX_CTRL_MSG_LEN + \ sizeof(struct ath10k_htc_hdr)) -#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1*HZ) +#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1 * HZ) struct ath10k_htc_ep { struct ath10k_htc *htc; @@ -312,18 +354,15 @@ struct ath10k_htc_ep { int max_ep_message_len; u8 ul_pipe_id; u8 dl_pipe_id; - int ul_is_polled; /* call HIF to get tx completions */ - int dl_is_polled; /* call HIF to fetch rx (not implemented) */ - - struct sk_buff_head tx_queue; u8 seq_no; /* for debugging */ int tx_credits; int tx_credit_size; - int tx_credits_per_max_message; bool tx_credit_flow_enabled; + bool bundle_tx; + struct sk_buff_head tx_req_head; + struct sk_buff_head tx_complete_head; - struct work_struct send_work; }; struct ath10k_htc_svc_tx_credits { @@ -335,7 +374,7 @@ struct ath10k_htc { struct ath10k *ar; struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT]; - /* protects endpoint and stopping fields */ + /* protects endpoints */ spinlock_t tx_lock; struct ath10k_htc_ops htc_ops; @@ -346,23 +385,37 @@ struct ath10k_htc { struct completion ctl_resp; int total_transmit_credits; - struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT]; int target_credit_size; - - bool stopping; + u8 max_msgs_per_htc_bundle; + int alt_data_credit_size; }; -struct ath10k_htc *ath10k_htc_create(struct ath10k *ar, - struct ath10k_htc_ops *htc_ops); +int ath10k_htc_init(struct ath10k *ar); int ath10k_htc_wait_target(struct ath10k_htc *htc); +void ath10k_htc_setup_tx_req(struct ath10k_htc_ep *ep); int ath10k_htc_start(struct ath10k_htc *htc); int ath10k_htc_connect_service(struct ath10k_htc *htc, struct ath10k_htc_svc_conn_req *conn_req, struct ath10k_htc_svc_conn_resp *conn_resp); +void ath10k_htc_change_tx_credit_flow(struct ath10k_htc *htc, + enum ath10k_htc_ep_id eid, + bool enable); int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid, struct sk_buff *packet); -void ath10k_htc_stop(struct ath10k_htc *htc); -void ath10k_htc_destroy(struct ath10k_htc *htc); -struct sk_buff *ath10k_htc_alloc_skb(int size); +void ath10k_htc_stop_hl(struct ath10k *ar); + +int ath10k_htc_send_hl(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid, + struct sk_buff *packet); +struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size); +void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb); +void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb); +void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep, + struct sk_buff *skb); +int ath10k_htc_process_trailer(struct ath10k_htc *htc, + u8 *buffer, + int length, + enum ath10k_htc_ep_id src_eid, + void *next_lookaheads, + int *next_lookaheads_len); #endif diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c index 185a5468a2f2..dbaf262cd7c1 100644 --- a/drivers/net/wireless/ath/ath10k/htt.c +++ b/drivers/net/wireless/ath/ath10k/htt.c @@ -1,42 +1,309 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/slab.h> +#include <linux/if_ether.h> #include "htt.h" #include "core.h" #include "debug.h" +#include "hif.h" -static int ath10k_htt_htc_attach(struct ath10k_htt *htt) +static const enum htt_t2h_msg_type htt_main_t2h_msg_types[] = { + [HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF, + [HTT_MAIN_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND, + [HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH, + [HTT_MAIN_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP, + [HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP, + [HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA, + [HTT_MAIN_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA, + [HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND, + [HTT_MAIN_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG, + [HTT_MAIN_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF, + [HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND, + [HTT_MAIN_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND, + [HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND] = + HTT_T2H_MSG_TYPE_TX_INSPECT_IND, + [HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] = + HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION, + [HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] = + HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND, + [HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND, + [HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] = + HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND, + [HTT_MAIN_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST, +}; + +static const enum htt_t2h_msg_type htt_10x_t2h_msg_types[] = { + [HTT_10X_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF, + [HTT_10X_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND, + [HTT_10X_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH, + [HTT_10X_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP, + [HTT_10X_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP, + [HTT_10X_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA, + [HTT_10X_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA, + [HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND, + [HTT_10X_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG, + [HTT_10X_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF, + [HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND, + [HTT_10X_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND, + [HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND, + [HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND] = HTT_T2H_MSG_TYPE_TX_INSPECT_IND, + [HTT_10X_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST, + [HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE, + [HTT_10X_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF, + [HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD] = HTT_T2H_MSG_TYPE_STATS_NOUPLOAD, + [HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] = + HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION, +}; + +static const enum htt_t2h_msg_type htt_tlv_t2h_msg_types[] = { + [HTT_TLV_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF, + [HTT_TLV_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND, + [HTT_TLV_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH, + [HTT_TLV_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP, + [HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP, + [HTT_TLV_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA, + [HTT_TLV_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA, + [HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND, + [HTT_TLV_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG, + [HTT_TLV_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF, + [HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND, + [HTT_TLV_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND, + [HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND, + [HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND] = HTT_T2H_MSG_TYPE_TX_INSPECT_IND, + [HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] = + HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION, + [HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] = + HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND, + [HTT_TLV_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND, + [HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] = + HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND, + [HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND] = + HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND, + [HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE] = + HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE, + [HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE, + [HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR] = + HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR, + [HTT_TLV_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST, +}; + +static const enum htt_t2h_msg_type htt_10_4_t2h_msg_types[] = { + [HTT_10_4_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF, + [HTT_10_4_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND, + [HTT_10_4_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH, + [HTT_10_4_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP, + [HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP, + [HTT_10_4_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA, + [HTT_10_4_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA, + [HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND, + [HTT_10_4_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG, + [HTT_10_4_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF, + [HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND, + [HTT_10_4_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND, + [HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND, + [HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND] = + HTT_T2H_MSG_TYPE_TX_INSPECT_IND, + [HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] = + HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION, + [HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE, + [HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] = + HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND, + [HTT_10_4_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND, + [HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] = + HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND, + [HTT_10_4_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST, + [HTT_10_4_T2H_MSG_TYPE_EN_STATS] = HTT_T2H_MSG_TYPE_EN_STATS, + [HTT_10_4_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF, + [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND] = + HTT_T2H_MSG_TYPE_TX_FETCH_IND, + [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM] = + HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM, + [HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD] = + HTT_T2H_MSG_TYPE_STATS_NOUPLOAD, + [HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND] = + HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND, + [HTT_10_4_T2H_MSG_TYPE_PEER_STATS] = + HTT_T2H_MSG_TYPE_PEER_STATS, +}; + +const struct ath10k_htt_rx_desc_ops qca988x_rx_desc_ops = { + .rx_desc_size = sizeof(struct htt_rx_desc_v1), + .rx_desc_msdu_payload_offset = offsetof(struct htt_rx_desc_v1, msdu_payload) +}; + +static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd) +{ + struct htt_rx_desc_v1 *rx_desc = container_of(rxd, + struct htt_rx_desc_v1, + base); + + return MS(__le32_to_cpu(rx_desc->msdu_end.qca99x0.info1), + RX_MSDU_END_INFO1_L3_HDR_PAD); +} + +static bool ath10k_qca99x0_rx_desc_msdu_limit_error(struct htt_rx_desc *rxd) +{ + struct htt_rx_desc_v1 *rx_desc = container_of(rxd, + struct htt_rx_desc_v1, + base); + + return !!(rx_desc->msdu_end.common.info0 & + __cpu_to_le32(RX_MSDU_END_INFO0_MSDU_LIMIT_ERR)); +} + +const struct ath10k_htt_rx_desc_ops qca99x0_rx_desc_ops = { + .rx_desc_size = sizeof(struct htt_rx_desc_v1), + .rx_desc_msdu_payload_offset = offsetof(struct htt_rx_desc_v1, msdu_payload), + + .rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes, + .rx_desc_get_msdu_limit_error = ath10k_qca99x0_rx_desc_msdu_limit_error, +}; + +static void ath10k_rx_desc_wcn3990_get_offsets(struct htt_rx_ring_rx_desc_offsets *off) +{ +#define desc_offset(x) (offsetof(struct htt_rx_desc_v2, x) / 4) + off->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); + off->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload)); + off->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start)); + off->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end)); + off->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start)); + off->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end)); + off->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start)); + off->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end)); + off->rx_attention_offset = __cpu_to_le16(desc_offset(attention)); + off->frag_info_offset = __cpu_to_le16(desc_offset(frag_info)); +#undef desc_offset +} + +static struct htt_rx_desc * +ath10k_rx_desc_wcn3990_from_raw_buffer(void *buff) +{ + return &((struct htt_rx_desc_v2 *)buff)->base; +} + +static struct rx_attention * +ath10k_rx_desc_wcn3990_get_attention(struct htt_rx_desc *rxd) +{ + struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base); + + return &rx_desc->attention; +} + +static struct rx_frag_info_common * +ath10k_rx_desc_wcn3990_get_frag_info(struct htt_rx_desc *rxd) +{ + struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base); + + return &rx_desc->frag_info.common; +} + +static struct rx_mpdu_start * +ath10k_rx_desc_wcn3990_get_mpdu_start(struct htt_rx_desc *rxd) +{ + struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base); + + return &rx_desc->mpdu_start; +} + +static struct rx_mpdu_end * +ath10k_rx_desc_wcn3990_get_mpdu_end(struct htt_rx_desc *rxd) +{ + struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base); + + return &rx_desc->mpdu_end; +} + +static struct rx_msdu_start_common * +ath10k_rx_desc_wcn3990_get_msdu_start(struct htt_rx_desc *rxd) +{ + struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base); + + return &rx_desc->msdu_start.common; +} + +static struct rx_msdu_end_common * +ath10k_rx_desc_wcn3990_get_msdu_end(struct htt_rx_desc *rxd) +{ + struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base); + + return &rx_desc->msdu_end.common; +} + +static struct rx_ppdu_start * +ath10k_rx_desc_wcn3990_get_ppdu_start(struct htt_rx_desc *rxd) +{ + struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base); + + return &rx_desc->ppdu_start; +} + +static struct rx_ppdu_end_common * +ath10k_rx_desc_wcn3990_get_ppdu_end(struct htt_rx_desc *rxd) +{ + struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base); + + return &rx_desc->ppdu_end.common; +} + +static u8 * +ath10k_rx_desc_wcn3990_get_rx_hdr_status(struct htt_rx_desc *rxd) +{ + struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base); + + return rx_desc->rx_hdr_status; +} + +static u8 * +ath10k_rx_desc_wcn3990_get_msdu_payload(struct htt_rx_desc *rxd) +{ + struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base); + + return rx_desc->msdu_payload; +} + +const struct ath10k_htt_rx_desc_ops wcn3990_rx_desc_ops = { + .rx_desc_size = sizeof(struct htt_rx_desc_v2), + .rx_desc_msdu_payload_offset = offsetof(struct htt_rx_desc_v2, msdu_payload), + + .rx_desc_from_raw_buffer = ath10k_rx_desc_wcn3990_from_raw_buffer, + .rx_desc_get_offsets = ath10k_rx_desc_wcn3990_get_offsets, + .rx_desc_get_attention = ath10k_rx_desc_wcn3990_get_attention, + .rx_desc_get_frag_info = ath10k_rx_desc_wcn3990_get_frag_info, + .rx_desc_get_mpdu_start = ath10k_rx_desc_wcn3990_get_mpdu_start, + .rx_desc_get_mpdu_end = ath10k_rx_desc_wcn3990_get_mpdu_end, + .rx_desc_get_msdu_start = ath10k_rx_desc_wcn3990_get_msdu_start, + .rx_desc_get_msdu_end = ath10k_rx_desc_wcn3990_get_msdu_end, + .rx_desc_get_ppdu_start = ath10k_rx_desc_wcn3990_get_ppdu_start, + .rx_desc_get_ppdu_end = ath10k_rx_desc_wcn3990_get_ppdu_end, + .rx_desc_get_rx_hdr_status = ath10k_rx_desc_wcn3990_get_rx_hdr_status, + .rx_desc_get_msdu_payload = ath10k_rx_desc_wcn3990_get_msdu_payload, +}; + +int ath10k_htt_connect(struct ath10k_htt *htt) { struct ath10k_htc_svc_conn_req conn_req; struct ath10k_htc_svc_conn_resp conn_resp; + struct ath10k *ar = htt->ar; + struct ath10k_htc_ep *ep; int status; memset(&conn_req, 0, sizeof(conn_req)); memset(&conn_resp, 0, sizeof(conn_resp)); conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete; - conn_req.ep_ops.ep_rx_complete = ath10k_htt_t2h_msg_handler; + conn_req.ep_ops.ep_rx_complete = ath10k_htt_htc_t2h_msg_handler; + conn_req.ep_ops.ep_tx_credits = ath10k_htt_op_ep_tx_credits; /* connect to control service */ conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG; - status = ath10k_htc_connect_service(htt->ar->htc, &conn_req, + status = ath10k_htc_connect_service(&htt->ar->htc, &conn_req, &conn_resp); if (status) @@ -44,38 +311,23 @@ static int ath10k_htt_htc_attach(struct ath10k_htt *htt) htt->eid = conn_resp.eid; + if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) { + ep = &ar->htc.endpoint[htt->eid]; + ath10k_htc_setup_tx_req(ep); + } + + htt->disable_tx_comp = ath10k_hif_get_htt_tx_complete(htt->ar); + if (htt->disable_tx_comp) + ath10k_htc_change_tx_credit_flow(&htt->ar->htc, htt->eid, true); + return 0; } -struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar) +int ath10k_htt_init(struct ath10k *ar) { - struct ath10k_htt *htt; - int ret; - - htt = kzalloc(sizeof(*htt), GFP_KERNEL); - if (!htt) - return NULL; + struct ath10k_htt *htt = &ar->htt; htt->ar = ar; - htt->max_throughput_mbps = 800; - - /* - * Connect to HTC service. - * This has to be done before calling ath10k_htt_rx_attach, - * since ath10k_htt_rx_attach involves sending a rx ring configure - * message to the target. - */ - if (ath10k_htt_htc_attach(htt)) - goto err_htc_attach; - - ret = ath10k_htt_tx_attach(htt); - if (ret) { - ath10k_err("could not attach htt tx (%d)\n", ret); - goto err_htc_attach; - } - - if (ath10k_htt_rx_attach(htt)) - goto err_rx_attach; /* * Prefetch enough data to satisfy target @@ -89,39 +341,56 @@ struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar) 8 + /* llc snap */ 2; /* ip4 dscp or ip6 priority */ - return htt; + switch (ar->running_fw->fw_file.htt_op_version) { + case ATH10K_FW_HTT_OP_VERSION_10_4: + ar->htt.t2h_msg_types = htt_10_4_t2h_msg_types; + ar->htt.t2h_msg_types_max = HTT_10_4_T2H_NUM_MSGS; + break; + case ATH10K_FW_HTT_OP_VERSION_10_1: + ar->htt.t2h_msg_types = htt_10x_t2h_msg_types; + ar->htt.t2h_msg_types_max = HTT_10X_T2H_NUM_MSGS; + break; + case ATH10K_FW_HTT_OP_VERSION_TLV: + ar->htt.t2h_msg_types = htt_tlv_t2h_msg_types; + ar->htt.t2h_msg_types_max = HTT_TLV_T2H_NUM_MSGS; + break; + case ATH10K_FW_HTT_OP_VERSION_MAIN: + ar->htt.t2h_msg_types = htt_main_t2h_msg_types; + ar->htt.t2h_msg_types_max = HTT_MAIN_T2H_NUM_MSGS; + break; + case ATH10K_FW_HTT_OP_VERSION_MAX: + case ATH10K_FW_HTT_OP_VERSION_UNSET: + WARN_ON(1); + return -EINVAL; + } + ath10k_htt_set_tx_ops(htt); + ath10k_htt_set_rx_ops(htt); -err_rx_attach: - ath10k_htt_tx_detach(htt); -err_htc_attach: - kfree(htt); - return NULL; + return 0; } -#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ) +#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ) static int ath10k_htt_verify_version(struct ath10k_htt *htt) { - ath10k_dbg(ATH10K_DBG_HTT, - "htt target version %d.%d; host version %d.%d\n", - htt->target_version_major, - htt->target_version_minor, - HTT_CURRENT_VERSION_MAJOR, - HTT_CURRENT_VERSION_MINOR); + struct ath10k *ar = htt->ar; - if (htt->target_version_major != HTT_CURRENT_VERSION_MAJOR) { - ath10k_err("htt major versions are incompatible!\n"); - return -ENOTSUPP; - } + ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt target version %d.%d\n", + htt->target_version_major, htt->target_version_minor); - if (htt->target_version_minor != HTT_CURRENT_VERSION_MINOR) - ath10k_warn("htt minor version differ but still compatible\n"); + if (htt->target_version_major != 2 && + htt->target_version_major != 3) { + ath10k_err(ar, "unsupported htt major version %d. supported versions are 2 and 3\n", + htt->target_version_major); + return -EOPNOTSUPP; + } return 0; } -int ath10k_htt_attach_target(struct ath10k_htt *htt) +int ath10k_htt_setup(struct ath10k_htt *htt) { + struct ath10k *ar = htt->ar; int status; init_completion(&htt->target_version_received); @@ -131,22 +400,38 @@ int ath10k_htt_attach_target(struct ath10k_htt *htt) return status; status = wait_for_completion_timeout(&htt->target_version_received, - HTT_TARGET_VERSION_TIMEOUT_HZ); - if (status <= 0) { - ath10k_warn("htt version request timed out\n"); + HTT_TARGET_VERSION_TIMEOUT_HZ); + if (status == 0) { + ath10k_warn(ar, "htt version request timed out\n"); return -ETIMEDOUT; } status = ath10k_htt_verify_version(htt); + if (status) { + ath10k_warn(ar, "failed to verify htt version: %d\n", + status); + return status; + } + + status = ath10k_htt_send_frag_desc_bank_cfg(htt); if (status) return status; - return ath10k_htt_send_rx_ring_cfg_ll(htt); -} + status = ath10k_htt_send_rx_ring_cfg(htt); + if (status) { + ath10k_warn(ar, "failed to setup rx ring: %d\n", + status); + return status; + } -void ath10k_htt_detach(struct ath10k_htt *htt) -{ - ath10k_htt_rx_detach(htt); - ath10k_htt_tx_detach(htt); - kfree(htt); + status = ath10k_htt_h2t_aggr_cfg_msg(htt, + htt->max_num_ampdu, + htt->max_num_amsdu); + if (status) { + ath10k_warn(ar, "failed to setup amsdu/ampdu limit: %d\n", + status); + return status; + } + + return 0; } diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index a7a7aa040536..603f6de62b0a 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -1,32 +1,25 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2021, 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _HTT_H_ #define _HTT_H_ #include <linux/bug.h> +#include <linux/interrupt.h> +#include <linux/dmapool.h> +#include <linux/hashtable.h> +#include <linux/kfifo.h> +#include <net/mac80211.h> -#include "core.h" #include "htc.h" +#include "hw.h" #include "rx_desc.h" -#define HTT_CURRENT_VERSION_MAJOR 2 -#define HTT_CURRENT_VERSION_MINOR 1 - enum htt_dbg_stats_type { HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0, HTT_DBG_STATS_RX_REORDER = 1 << 1, @@ -46,7 +39,12 @@ enum htt_h2t_msg_type { /* host-to-target */ HTT_H2T_MSG_TYPE_SYNC = 4, HTT_H2T_MSG_TYPE_AGGR_CFG = 5, HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6, + + /* This command is used for sending management frames in HTT < 3.0. + * HTT >= 3.0 uses TX_FRM for everything. + */ HTT_H2T_MSG_TYPE_MGMT_TX = 7, + HTT_H2T_MSG_TYPE_TX_FETCH_RESP = 11, HTT_H2T_NUM_MSGS /* keep this last */ }; @@ -72,16 +70,66 @@ struct htt_ver_req { * The HTT tx descriptor is defined in two manners: by a struct with * bitfields, and by a series of [dword offset, bit mask, bit shift] * definitions. - * The target should use the struct def, for simplicitly and clarity, + * The target should use the struct def, for simplicity and clarity, * but the host shall use the bit-mast + bit-shift defs, to be endian- * neutral. Specifically, the host shall use the get/set macros built * around the mask + shift defs. */ struct htt_data_tx_desc_frag { - __le32 paddr; - __le32 len; + union { + struct double_word_addr { + __le32 paddr; + __le32 len; + } __packed dword_addr; + struct triple_word_addr { + __le32 paddr_lo; + __le16 paddr_hi; + __le16 len_16; + } __packed tword_addr; + } __packed; } __packed; +struct htt_msdu_ext_desc { + __le32 tso_flag[3]; + __le16 ip_identification; + u8 flags; + u8 reserved; + struct htt_data_tx_desc_frag frags[6]; +}; + +struct htt_msdu_ext_desc_64 { + __le32 tso_flag[5]; + __le16 ip_identification; + u8 flags; + u8 reserved; + struct htt_data_tx_desc_frag frags[6]; +}; + +#define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE BIT(0) +#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE BIT(1) +#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE BIT(2) +#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE BIT(3) +#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE BIT(4) + +#define HTT_MSDU_CHECKSUM_ENABLE (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE \ + | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE \ + | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE \ + | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \ + | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE) + +#define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 BIT(16) +#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 BIT(17) +#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 BIT(18) +#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 BIT(19) +#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64 BIT(20) +#define HTT_MSDU_EXT_DESC_FLAG_PARTIAL_CSUM_ENABLE_64 BIT(21) + +#define HTT_MSDU_CHECKSUM_ENABLE_64 (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 \ + | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 \ + | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 \ + | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 \ + | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64) + enum htt_data_tx_desc_flags0 { HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0, HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1, @@ -103,9 +151,19 @@ enum htt_data_tx_desc_flags1 { HTT_DATA_TX_DESC_FLAGS1_MORE_IN_BATCH = 1 << 12, HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD = 1 << 13, HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD = 1 << 14, - HTT_DATA_TX_DESC_FLAGS1_RSVD1 = 1 << 15 + HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE = 1 << 15 }; +#define HTT_TX_CREDIT_DELTA_ABS_M 0xffff0000 +#define HTT_TX_CREDIT_DELTA_ABS_S 16 +#define HTT_TX_CREDIT_DELTA_ABS_GET(word) \ + (((word) & HTT_TX_CREDIT_DELTA_ABS_M) >> HTT_TX_CREDIT_DELTA_ABS_S) + +#define HTT_TX_CREDIT_SIGN_BIT_M 0x00000100 +#define HTT_TX_CREDIT_SIGN_BIT_S 8 +#define HTT_TX_CREDIT_SIGN_BIT_GET(word) \ + (((word) & HTT_TX_CREDIT_SIGN_BIT_M) >> HTT_TX_CREDIT_SIGN_BIT_S) + enum htt_data_tx_ext_tid { HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST = 16, HTT_DATA_TX_EXT_TID_MGMT = 17, @@ -124,6 +182,7 @@ enum htt_data_tx_ext_tid { * (HL hosts manage queues on the host ) * more_in_batch: only for HL hosts. indicates if more packets are * pending. this allows target to wait and aggregate + * freq: 0 means home channel of given vdev. intended for offchannel */ struct htt_data_tx_desc { u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */ @@ -131,7 +190,29 @@ struct htt_data_tx_desc { __le16 len; __le16 id; __le32 frags_paddr; - __le32 peerid; + union { + __le32 peerid; + struct { + __le16 peerid; + __le16 freq; + } __packed offchan_tx; + } __packed; + u8 prefetch[0]; /* start of frame, for FW classification engine */ +} __packed; + +struct htt_data_tx_desc_64 { + u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */ + __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */ + __le16 len; + __le16 id; + __le64 frags_paddr; + union { + __le32 peerid; + struct { + __le16 peerid; + __le16 freq; + } __packed offchan_tx; + } __packed; u8 prefetch[0]; /* start of frame, for FW classification engine */ } __packed; @@ -154,14 +235,13 @@ enum htt_rx_ring_flags { HTT_RX_RING_FLAGS_PHY_DATA_RX = 1 << 15 }; -struct htt_rx_ring_setup_ring { - __le32 fw_idx_shadow_reg_paddr; - __le32 rx_ring_base_paddr; - __le16 rx_ring_len; /* in 4-byte words */ - __le16 rx_ring_bufsize; /* rx skb size - in bytes */ - __le16 flags; /* %HTT_RX_RING_FLAGS_ */ - __le16 fw_idx_init_val; +#define HTT_RX_RING_SIZE_MIN 128 +#define HTT_RX_RING_SIZE_MAX 2048 +#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX +#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1) +#define HTT_RX_RING_FILL_LEVEL_DUAL_MAC (HTT_RX_RING_SIZE - 1) +struct htt_rx_ring_rx_desc_offsets { /* the following offsets are in 4-byte units */ __le16 mac80211_hdr_offset; __le16 msdu_payload_offset; @@ -175,14 +255,41 @@ struct htt_rx_ring_setup_ring { __le16 frag_info_offset; } __packed; +struct htt_rx_ring_setup_ring32 { + __le32 fw_idx_shadow_reg_paddr; + __le32 rx_ring_base_paddr; + __le16 rx_ring_len; /* in 4-byte words */ + __le16 rx_ring_bufsize; /* rx skb size - in bytes */ + __le16 flags; /* %HTT_RX_RING_FLAGS_ */ + __le16 fw_idx_init_val; + + struct htt_rx_ring_rx_desc_offsets offsets; +} __packed; + +struct htt_rx_ring_setup_ring64 { + __le64 fw_idx_shadow_reg_paddr; + __le64 rx_ring_base_paddr; + __le16 rx_ring_len; /* in 4-byte words */ + __le16 rx_ring_bufsize; /* rx skb size - in bytes */ + __le16 flags; /* %HTT_RX_RING_FLAGS_ */ + __le16 fw_idx_init_val; + + struct htt_rx_ring_rx_desc_offsets offsets; +} __packed; + struct htt_rx_ring_setup_hdr { u8 num_rings; /* supported values: 1, 2 */ __le16 rsvd0; } __packed; -struct htt_rx_ring_setup { +struct htt_rx_ring_setup_32 { + struct htt_rx_ring_setup_hdr hdr; + struct htt_rx_ring_setup_ring32 rings[]; +} __packed; + +struct htt_rx_ring_setup_64 { struct htt_rx_ring_setup_hdr hdr; - struct htt_rx_ring_setup_ring rings[0]; + struct htt_rx_ring_setup_ring64 rings[]; } __packed; /* @@ -213,6 +320,7 @@ struct htt_stats_req { } __packed; #define HTT_STATS_REQ_CFG_STAT_TYPE_INVALID 0xff +#define HTT_STATS_BIT_MASK GENMASK(16, 0) /* * htt_oob_sync_req - request out-of-band sync @@ -238,19 +346,23 @@ struct htt_oob_sync_req { __le16 rsvd0; } __packed; -#define HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_MASK 0x1F -#define HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_LSB 0 - struct htt_aggr_conf { u8 max_num_ampdu_subframes; - union { - /* dont use bitfields; undefined behaviour */ - u8 flags; /* see %HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_ */ - u8 max_num_amsdu_subframes:5; - } __packed; + /* amsdu_subframes is limited by 0x1F mask */ + u8 max_num_amsdu_subframes; +} __packed; + +struct htt_aggr_conf_v2 { + u8 max_num_ampdu_subframes; + /* amsdu_subframes is limited by 0x1F mask */ + u8 max_num_amsdu_subframes; + u8 reserved; } __packed; #define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32 +struct htt_mgmt_tx_desc_qca99x0 { + __le32 rate; +} __packed; struct htt_mgmt_tx_desc { u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)]; @@ -259,6 +371,9 @@ struct htt_mgmt_tx_desc { __le32 len; __le32 vdev_id; u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN]; + union { + struct htt_mgmt_tx_desc_qca99x0 qca99x0; + } __packed; } __packed; enum htt_mgmt_tx_status { @@ -269,24 +384,146 @@ enum htt_mgmt_tx_status { /*=== target -> host messages ===============================================*/ +enum htt_main_t2h_msg_type { + HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF = 0x0, + HTT_MAIN_T2H_MSG_TYPE_RX_IND = 0x1, + HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH = 0x2, + HTT_MAIN_T2H_MSG_TYPE_PEER_MAP = 0x3, + HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP = 0x4, + HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA = 0x5, + HTT_MAIN_T2H_MSG_TYPE_RX_DELBA = 0x6, + HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND = 0x7, + HTT_MAIN_T2H_MSG_TYPE_PKTLOG = 0x8, + HTT_MAIN_T2H_MSG_TYPE_STATS_CONF = 0x9, + HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND = 0xa, + HTT_MAIN_T2H_MSG_TYPE_SEC_IND = 0xb, + HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd, + HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe, + HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf, + HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND = 0x10, + HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11, + HTT_MAIN_T2H_MSG_TYPE_TEST, + /* keep this last */ + HTT_MAIN_T2H_NUM_MSGS +}; + +enum htt_10x_t2h_msg_type { + HTT_10X_T2H_MSG_TYPE_VERSION_CONF = 0x0, + HTT_10X_T2H_MSG_TYPE_RX_IND = 0x1, + HTT_10X_T2H_MSG_TYPE_RX_FLUSH = 0x2, + HTT_10X_T2H_MSG_TYPE_PEER_MAP = 0x3, + HTT_10X_T2H_MSG_TYPE_PEER_UNMAP = 0x4, + HTT_10X_T2H_MSG_TYPE_RX_ADDBA = 0x5, + HTT_10X_T2H_MSG_TYPE_RX_DELBA = 0x6, + HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND = 0x7, + HTT_10X_T2H_MSG_TYPE_PKTLOG = 0x8, + HTT_10X_T2H_MSG_TYPE_STATS_CONF = 0x9, + HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND = 0xa, + HTT_10X_T2H_MSG_TYPE_SEC_IND = 0xb, + HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, + HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd, + HTT_10X_T2H_MSG_TYPE_TEST = 0xe, + HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE = 0xf, + HTT_10X_T2H_MSG_TYPE_AGGR_CONF = 0x11, + HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x12, + HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0x13, + /* keep this last */ + HTT_10X_T2H_NUM_MSGS +}; + +enum htt_tlv_t2h_msg_type { + HTT_TLV_T2H_MSG_TYPE_VERSION_CONF = 0x0, + HTT_TLV_T2H_MSG_TYPE_RX_IND = 0x1, + HTT_TLV_T2H_MSG_TYPE_RX_FLUSH = 0x2, + HTT_TLV_T2H_MSG_TYPE_PEER_MAP = 0x3, + HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP = 0x4, + HTT_TLV_T2H_MSG_TYPE_RX_ADDBA = 0x5, + HTT_TLV_T2H_MSG_TYPE_RX_DELBA = 0x6, + HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND = 0x7, + HTT_TLV_T2H_MSG_TYPE_PKTLOG = 0x8, + HTT_TLV_T2H_MSG_TYPE_STATS_CONF = 0x9, + HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND = 0xa, + HTT_TLV_T2H_MSG_TYPE_SEC_IND = 0xb, + HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, /* deprecated */ + HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd, + HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe, + HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf, + HTT_TLV_T2H_MSG_TYPE_RX_PN_IND = 0x10, + HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11, + HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12, + /* 0x13 reservd */ + HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14, + HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE = 0x15, + HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR = 0x16, + HTT_TLV_T2H_MSG_TYPE_TEST, + /* keep this last */ + HTT_TLV_T2H_NUM_MSGS +}; + +enum htt_10_4_t2h_msg_type { + HTT_10_4_T2H_MSG_TYPE_VERSION_CONF = 0x0, + HTT_10_4_T2H_MSG_TYPE_RX_IND = 0x1, + HTT_10_4_T2H_MSG_TYPE_RX_FLUSH = 0x2, + HTT_10_4_T2H_MSG_TYPE_PEER_MAP = 0x3, + HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP = 0x4, + HTT_10_4_T2H_MSG_TYPE_RX_ADDBA = 0x5, + HTT_10_4_T2H_MSG_TYPE_RX_DELBA = 0x6, + HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND = 0x7, + HTT_10_4_T2H_MSG_TYPE_PKTLOG = 0x8, + HTT_10_4_T2H_MSG_TYPE_STATS_CONF = 0x9, + HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND = 0xa, + HTT_10_4_T2H_MSG_TYPE_SEC_IND = 0xb, + HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, + HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd, + HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe, + HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE = 0xf, + HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0x10, + HTT_10_4_T2H_MSG_TYPE_RX_PN_IND = 0x11, + HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x12, + HTT_10_4_T2H_MSG_TYPE_TEST = 0x13, + HTT_10_4_T2H_MSG_TYPE_EN_STATS = 0x14, + HTT_10_4_T2H_MSG_TYPE_AGGR_CONF = 0x15, + HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND = 0x16, + HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM = 0x17, + HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x18, + /* 0x19 to 0x2f are reserved */ + HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND = 0x30, + HTT_10_4_T2H_MSG_TYPE_PEER_STATS = 0x31, + /* keep this last */ + HTT_10_4_T2H_NUM_MSGS +}; enum htt_t2h_msg_type { - HTT_T2H_MSG_TYPE_VERSION_CONF = 0x0, - HTT_T2H_MSG_TYPE_RX_IND = 0x1, - HTT_T2H_MSG_TYPE_RX_FLUSH = 0x2, - HTT_T2H_MSG_TYPE_PEER_MAP = 0x3, - HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x4, - HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5, - HTT_T2H_MSG_TYPE_RX_DELBA = 0x6, - HTT_T2H_MSG_TYPE_TX_COMPL_IND = 0x7, - HTT_T2H_MSG_TYPE_PKTLOG = 0x8, - HTT_T2H_MSG_TYPE_STATS_CONF = 0x9, - HTT_T2H_MSG_TYPE_RX_FRAG_IND = 0xa, - HTT_T2H_MSG_TYPE_SEC_IND = 0xb, - HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, - HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd, - HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe, + HTT_T2H_MSG_TYPE_VERSION_CONF, + HTT_T2H_MSG_TYPE_RX_IND, + HTT_T2H_MSG_TYPE_RX_FLUSH, + HTT_T2H_MSG_TYPE_PEER_MAP, + HTT_T2H_MSG_TYPE_PEER_UNMAP, + HTT_T2H_MSG_TYPE_RX_ADDBA, + HTT_T2H_MSG_TYPE_RX_DELBA, + HTT_T2H_MSG_TYPE_TX_COMPL_IND, + HTT_T2H_MSG_TYPE_PKTLOG, + HTT_T2H_MSG_TYPE_STATS_CONF, + HTT_T2H_MSG_TYPE_RX_FRAG_IND, + HTT_T2H_MSG_TYPE_SEC_IND, + HTT_T2H_MSG_TYPE_RC_UPDATE_IND, + HTT_T2H_MSG_TYPE_TX_INSPECT_IND, + HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION, + HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND, + HTT_T2H_MSG_TYPE_RX_PN_IND, + HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND, + HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND, + HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE, + HTT_T2H_MSG_TYPE_CHAN_CHANGE, + HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR, + HTT_T2H_MSG_TYPE_AGGR_CONF, + HTT_T2H_MSG_TYPE_STATS_NOUPLOAD, HTT_T2H_MSG_TYPE_TEST, + HTT_T2H_MSG_TYPE_EN_STATS, + HTT_T2H_MSG_TYPE_TX_FETCH_IND, + HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM, + HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND, + HTT_T2H_MSG_TYPE_PEER_STATS, /* keep this last */ HTT_T2H_NUM_MSGS }; @@ -311,18 +548,25 @@ struct htt_ver_resp { u8 rsvd0; } __packed; +#define HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI BIT(0) + +#define HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK GENMASK(7, 0) + struct htt_mgmt_tx_completion { u8 rsvd0; u8 rsvd1; - u8 rsvd2; + u8 flags; __le32 desc_id; __le32 status; + __le32 ppdu_id; + __le32 info; } __packed; -#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x3F) +#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x1F) #define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0) -#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 6) -#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 7) +#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 5) +#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 6) +#define HTT_RX_INDICATION_INFO0_PPDU_DURATION BIT(7) #define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F #define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0 @@ -335,6 +579,15 @@ struct htt_mgmt_tx_completion { #define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK 0xFF000000 #define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB 24 +#define HTT_TX_CMPL_FLAG_DATA_RSSI BIT(0) +#define HTT_TX_CMPL_FLAG_PPID_PRESENT BIT(1) +#define HTT_TX_CMPL_FLAG_PA_PRESENT BIT(2) +#define HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT BIT(3) + +#define HTT_TX_DATA_RSSI_ENABLE_WCN3990 BIT(3) +#define HTT_TX_DATA_APPEND_RETRIES BIT(0) +#define HTT_TX_DATA_APPEND_TIMESTAMP BIT(1) + struct htt_rx_indication_hdr { u8 info0; /* %HTT_RX_INDICATION_INFO0_ */ __le16 peer_id; @@ -429,7 +682,7 @@ enum htt_rx_mpdu_status { /* only accept EAPOL frames */ HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER, HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC, - /* Non-data in promiscous mode */ + /* Non-data in promiscuous mode */ HTT_RX_IND_MPDU_STATUS_MGMT_CTRL, HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR, HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR, @@ -455,7 +708,7 @@ struct htt_rx_indication_prefix { __le16 fw_rx_desc_bytes; u8 pad0; u8 pad1; -}; +} __packed; struct htt_rx_indication { struct htt_rx_indication_hdr hdr; @@ -474,7 +727,30 @@ struct htt_rx_indication { * %mpdu_ranges starts after &%prefix + roundup(%fw_rx_desc_bytes, 4) * and has %num_mpdu_ranges elements. */ - struct htt_rx_indication_mpdu_range mpdu_ranges[0]; + struct htt_rx_indication_mpdu_range mpdu_ranges[]; +} __packed; + +/* High latency version of the RX indication */ +struct htt_rx_indication_hl { + struct htt_rx_indication_hdr hdr; + struct htt_rx_indication_ppdu ppdu; + struct htt_rx_indication_prefix prefix; + struct fw_rx_desc_hl fw_desc; + struct htt_rx_indication_mpdu_range mpdu_ranges[]; +} __packed; + +struct htt_hl_rx_desc { + __le32 info; + __le32 pn_31_0; + union { + struct { + __le16 pn_47_32; + __le16 pn_63_48; + } pn16; + __le32 pn_63_32; + } u0; + __le32 pn_95_64; + __le32 pn_127_96; } __packed; static inline struct htt_rx_indication_mpdu_range * @@ -489,6 +765,18 @@ static inline struct htt_rx_indication_mpdu_range * return ptr; } +static inline struct htt_rx_indication_mpdu_range * + htt_rx_ind_get_mpdu_ranges_hl(struct htt_rx_indication_hl *rx_ind) +{ + void *ptr = rx_ind; + + ptr += sizeof(rx_ind->hdr) + + sizeof(rx_ind->ppdu) + + sizeof(rx_ind->prefix) + + sizeof(rx_ind->fw_desc); + return ptr; +} + enum htt_rx_flush_mpdu_status { HTT_RX_FLUSH_MPDU_DISCARD = 0, HTT_RX_FLUSH_MPDU_REORDER = 1, @@ -522,6 +810,21 @@ struct htt_rx_peer_unmap { __le16 peer_id; } __packed; +enum htt_txrx_sec_cast_type { + HTT_TXRX_SEC_MCAST = 0, + HTT_TXRX_SEC_UCAST +}; + +enum htt_rx_pn_check_type { + HTT_RX_NON_PN_CHECK = 0, + HTT_RX_PN_CHECK +}; + +enum htt_rx_tkip_demic_type { + HTT_RX_NON_TKIP_MIC = 0, + HTT_RX_TKIP_MIC +}; + enum htt_security_types { HTT_SECURITY_NONE, HTT_SECURITY_WEP128, @@ -535,6 +838,10 @@ enum htt_security_types { HTT_NUM_SECURITY_TYPES /* keep this last! */ }; +#define ATH10K_HTT_TXRX_PEER_SECURITY_MAX 2 +#define ATH10K_TXRX_NUM_EXT_TIDS 19 +#define ATH10K_TXRX_NON_QOS_TID 16 + enum htt_security_flags { #define HTT_SECURITY_TYPE_MASK 0x7F #define HTT_SECURITY_TYPE_LSB 0 @@ -574,8 +881,7 @@ enum htt_data_tx_status { HTT_DATA_TX_STATUS_OK = 0, HTT_DATA_TX_STATUS_DISCARD = 1, HTT_DATA_TX_STATUS_NO_ACK = 2, - HTT_DATA_TX_STATUS_POSTPONE = 3, /* HL only */ - HTT_DATA_TX_STATUS_DOWNLOAD_FAIL = 128 + HTT_DATA_TX_STATUS_POSTPONE = 3 /* HL only */ }; enum htt_data_tx_flags { @@ -588,6 +894,88 @@ enum htt_data_tx_flags { #define HTT_TX_COMPL_INV_MSDU_ID 0xFFFF +struct htt_append_retries { + __le16 msdu_id; + u8 tx_retries; + u8 flag; +} __packed; + +struct htt_data_tx_completion_ext { + struct htt_append_retries a_retries; + __le32 t_stamp; + __le16 msdus_rssi[]; +} __packed; + +/* + * @brief target -> host TX completion indication message definition + * + * @details + * The following diagram shows the format of the TX completion indication sent + * from the target to the host + * + * |31 28|27|26|25|24|23 16| 15 |14 11|10 8|7 0| + * |-------------------------------------------------------------| + * header: |rsvd |A2|TP|A1|A0| num | t_i| tid |status| msg_type | + * |-------------------------------------------------------------| + * payload: | MSDU1 ID | MSDU0 ID | + * |-------------------------------------------------------------| + * : MSDU3 ID : MSDU2 ID : + * |-------------------------------------------------------------| + * | struct htt_tx_compl_ind_append_retries | + * |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -| + * | struct htt_tx_compl_ind_append_tx_tstamp | + * |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -| + * | MSDU1 ACK RSSI | MSDU0 ACK RSSI | + * |-------------------------------------------------------------| + * : MSDU3 ACK RSSI : MSDU2 ACK RSSI : + * |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -| + * -msg_type + * Bits 7:0 + * Purpose: identifies this as HTT TX completion indication + * -status + * Bits 10:8 + * Purpose: the TX completion status of payload fragmentations descriptors + * Value: could be HTT_TX_COMPL_IND_STAT_OK or HTT_TX_COMPL_IND_STAT_DISCARD + * -tid + * Bits 14:11 + * Purpose: the tid associated with those fragmentation descriptors. It is + * valid or not, depending on the tid_invalid bit. + * Value: 0 to 15 + * -tid_invalid + * Bits 15:15 + * Purpose: this bit indicates whether the tid field is valid or not + * Value: 0 indicates valid, 1 indicates invalid + * -num + * Bits 23:16 + * Purpose: the number of payload in this indication + * Value: 1 to 255 + * -A0 = append + * Bits 24:24 + * Purpose: append the struct htt_tx_compl_ind_append_retries which contains + * the number of tx retries for one MSDU at the end of this message + * Value: 0 indicates no appending, 1 indicates appending + * -A1 = append1 + * Bits 25:25 + * Purpose: Append the struct htt_tx_compl_ind_append_tx_tstamp which + * contains the timestamp info for each TX msdu id in payload. + * Value: 0 indicates no appending, 1 indicates appending + * -TP = MSDU tx power presence + * Bits 26:26 + * Purpose: Indicate whether the TX_COMPL_IND includes a tx power report + * for each MSDU referenced by the TX_COMPL_IND message. + * The order of the per-MSDU tx power reports matches the order + * of the MSDU IDs. + * Value: 0 indicates not appending, 1 indicates appending + * -A2 = append2 + * Bits 27:27 + * Purpose: Indicate whether data ACK RSSI is appended for each MSDU in + * TX_COMP_IND message. The order of the per-MSDU ACK RSSI report + * matches the order of the MSDU IDs. + * The ACK RSSI values are valid when status is COMPLETE_OK (and + * this append2 bit is set). + * Value: 0 indicates not appending, 1 indicates appending + */ + struct htt_data_tx_completion { union { u8 flags; @@ -598,8 +986,23 @@ struct htt_data_tx_completion { } __packed; } __packed; u8 num_msdus; - u8 rsvd0; - __le16 msdus[0]; /* variable length based on %num_msdus */ + u8 flags2; /* HTT_TX_CMPL_FLAG_DATA_RSSI */ + __le16 msdus[]; /* variable length based on %num_msdus */ +} __packed; + +#define HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK GENMASK(15, 0) +#define HTT_TX_PPDU_DUR_INFO0_TID_MASK GENMASK(20, 16) + +struct htt_data_tx_ppdu_dur { + __le32 info0; /* HTT_TX_PPDU_DUR_INFO0_ */ + __le32 tx_duration; /* in usecs */ +} __packed; + +#define HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK GENMASK(7, 0) + +struct htt_data_tx_compl_ppdu_dur { + __le32 info0; /* HTT_TX_COMPL_PPDU_DUR_INFO0_ */ + struct htt_data_tx_ppdu_dur ppdu_dur[]; } __packed; struct htt_tx_compl_ind_base { @@ -625,7 +1028,7 @@ struct htt_rc_update { u8 addr[6]; u8 num_elems; u8 rsvd0; - struct htt_rc_tx_done_params params[0]; /* variable length %num_elems */ + struct htt_rc_tx_done_params params[]; /* variable length %num_elems */ } __packed; /* see htt_rx_indication for similar fields and descriptions */ @@ -642,9 +1045,14 @@ struct htt_rx_fragment_indication { __le16 fw_rx_desc_bytes; __le16 rsvd0; - u8 fw_msdu_rx_desc[0]; + u8 fw_msdu_rx_desc[]; } __packed; +#define ATH10K_IEEE80211_EXTIV BIT(5) +#define ATH10K_IEEE80211_TKIP_MICLEN 8 /* trailing MIC */ + +#define HTT_RX_FRAG_IND_INFO0_HEADER_LEN 16 + #define HTT_RX_FRAG_IND_INFO0_EXT_TID_MASK 0x1F #define HTT_RX_FRAG_IND_INFO0_EXT_TID_LSB 0 #define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_MASK 0x20 @@ -655,6 +1063,65 @@ struct htt_rx_fragment_indication { #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0 #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6 +struct htt_rx_pn_ind { + __le16 peer_id; + u8 tid; + u8 seqno_start; + u8 seqno_end; + u8 pn_ie_count; + u8 reserved; + u8 pn_ies[]; +} __packed; + +struct htt_rx_offload_msdu { + __le16 msdu_len; + __le16 peer_id; + u8 vdev_id; + u8 tid; + u8 fw_desc; + u8 payload[]; +} __packed; + +struct htt_rx_offload_ind { + u8 reserved; + __le16 msdu_count; +} __packed; + +struct htt_rx_in_ord_msdu_desc { + __le32 msdu_paddr; + __le16 msdu_len; + u8 fw_desc; + u8 reserved; +} __packed; + +struct htt_rx_in_ord_msdu_desc_ext { + __le64 msdu_paddr; + __le16 msdu_len; + u8 fw_desc; + u8 reserved; +} __packed; + +struct htt_rx_in_ord_ind { + u8 info; + __le16 peer_id; + u8 vdev_id; + u8 reserved; + __le16 msdu_count; + union { + DECLARE_FLEX_ARRAY(struct htt_rx_in_ord_msdu_desc, + msdu_descs32); + DECLARE_FLEX_ARRAY(struct htt_rx_in_ord_msdu_desc_ext, + msdu_descs64); + } __packed; +} __packed; + +#define HTT_RX_IN_ORD_IND_INFO_TID_MASK 0x0000001f +#define HTT_RX_IN_ORD_IND_INFO_TID_LSB 0 +#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK 0x00000020 +#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_LSB 5 +#define HTT_RX_IN_ORD_IND_INFO_FRAG_MASK 0x00000040 +#define HTT_RX_IN_ORD_IND_INFO_FRAG_LSB 6 + /* * target -> host test message definition * @@ -687,7 +1154,7 @@ struct htt_rx_fragment_indication { * Purpose: indicate how many 32-bit integers follow the message header * - NUM_CHARS * Bits 31:16 - * Purpose: indicate how many 8-bit charaters follow the series of integers + * Purpose: indicate how many 8-bit characters follow the series of integers */ struct htt_rx_test { u8 num_ints; @@ -695,8 +1162,9 @@ struct htt_rx_test { /* payload consists of 2 lists: * a) num_ints * sizeof(__le32) - * b) num_chars * sizeof(u8) aligned to 4bytes */ - u8 payload[0]; + * b) num_chars * sizeof(u8) aligned to 4bytes + */ + u8 payload[]; } __packed; static inline __le32 *htt_rx_test_get_ints(struct htt_rx_test *rx_test) @@ -730,7 +1198,7 @@ static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test) */ struct htt_pktlog_msg { u8 pad[3]; - __le32 payload[1 /* or more */]; + u8 payload[]; } __packed; struct htt_dbg_stats_rx_reorder_stats { @@ -811,8 +1279,8 @@ struct htt_dbg_stats_wal_tx_stats { /* Num PPDUs cleaned up in TX abort */ __le32 tx_abort; - /* Num MPDUs requed by SW */ - __le32 mpdus_requed; + /* Num MPDUs requeued by SW */ + __le32 mpdus_requeued; /* excessive retries */ __le32 tx_ko; @@ -829,10 +1297,10 @@ struct htt_dbg_stats_wal_tx_stats { /* illegal rate phy errors */ __le32 illgl_rate_phy_err; - /* wal pdev continous xretry */ + /* wal pdev continuous xretry */ __le32 pdev_cont_xretry; - /* wal pdev continous xretry */ + /* wal pdev continuous xretry */ __le32 pdev_tx_timeout; /* wal pdev resets */ @@ -929,114 +1397,6 @@ enum htt_dbg_stats_status { }; /* - * target -> host statistics upload - * - * The following field definitions describe the format of the HTT target - * to host stats upload confirmation message. - * The message contains a cookie echoed from the HTT host->target stats - * upload request, which identifies which request the confirmation is - * for, and a series of tag-length-value stats information elements. - * The tag-length header for each stats info element also includes a - * status field, to indicate whether the request for the stat type in - * question was fully met, partially met, unable to be met, or invalid - * (if the stat type in question is disabled in the target). - * A special value of all 1's in this status field is used to indicate - * the end of the series of stats info elements. - * - * - * |31 16|15 8|7 5|4 0| - * |------------------------------------------------------------| - * | reserved | msg type | - * |------------------------------------------------------------| - * | cookie LSBs | - * |------------------------------------------------------------| - * | cookie MSBs | - * |------------------------------------------------------------| - * | stats entry length | reserved | S |stat type| - * |------------------------------------------------------------| - * | | - * | type-specific stats info | - * | | - * |------------------------------------------------------------| - * | stats entry length | reserved | S |stat type| - * |------------------------------------------------------------| - * | | - * | type-specific stats info | - * | | - * |------------------------------------------------------------| - * | n/a | reserved | 111 | n/a | - * |------------------------------------------------------------| - * Header fields: - * - MSG_TYPE - * Bits 7:0 - * Purpose: identifies this is a statistics upload confirmation message - * Value: 0x9 - * - COOKIE_LSBS - * Bits 31:0 - * Purpose: Provide a mechanism to match a target->host stats confirmation - * message with its preceding host->target stats request message. - * Value: LSBs of the opaque cookie specified by the host-side requestor - * - COOKIE_MSBS - * Bits 31:0 - * Purpose: Provide a mechanism to match a target->host stats confirmation - * message with its preceding host->target stats request message. - * Value: MSBs of the opaque cookie specified by the host-side requestor - * - * Stats Information Element tag-length header fields: - * - STAT_TYPE - * Bits 4:0 - * Purpose: identifies the type of statistics info held in the - * following information element - * Value: htt_dbg_stats_type - * - STATUS - * Bits 7:5 - * Purpose: indicate whether the requested stats are present - * Value: htt_dbg_stats_status, including a special value (0x7) to mark - * the completion of the stats entry series - * - LENGTH - * Bits 31:16 - * Purpose: indicate the stats information size - * Value: This field specifies the number of bytes of stats information - * that follows the element tag-length header. - * It is expected but not required that this length is a multiple of - * 4 bytes. Even if the length is not an integer multiple of 4, the - * subsequent stats entry header will begin on a 4-byte aligned - * boundary. - */ - -#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_MASK 0x1F -#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_LSB 0 -#define HTT_STATS_CONF_ITEM_INFO_STATUS_MASK 0xE0 -#define HTT_STATS_CONF_ITEM_INFO_STATUS_LSB 5 - -struct htt_stats_conf_item { - union { - u8 info; - struct { - u8 stat_type:5; /* %HTT_DBG_STATS_ */ - u8 status:3; /* %HTT_DBG_STATS_STATUS_ */ - } __packed; - } __packed; - u8 pad; - __le16 length; - u8 payload[0]; /* roundup(length, 4) long */ -} __packed; - -struct htt_stats_conf { - u8 pad[3]; - __le32 cookie_lsb; - __le32 cookie_msb; - - /* each item has variable length! */ - struct htt_stats_conf_item items[0]; -} __packed; - -static inline struct htt_stats_conf_item *htt_stats_conf_next_item( - const struct htt_stats_conf_item *item) -{ - return (void *)item + sizeof(*item) + roundup(item->length, 4); -} -/* * host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank * * The following field definitions describe the format of the HTT host @@ -1091,19 +1451,225 @@ struct htt_frag_desc_bank_id { } __packed; /* real is 16 but it wouldn't fit in the max htt message size - * so we use a conservatively safe value for now */ + * so we use a conservatively safe value for now + */ #define HTT_FRAG_DESC_BANK_MAX 4 -#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03 -#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0 -#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP (1 << 2) +#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03 +#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0 +#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP BIT(2) +#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID BIT(3) +#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_MASK BIT(4) +#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_LSB 4 + +enum htt_q_depth_type { + HTT_Q_DEPTH_TYPE_BYTES = 0, + HTT_Q_DEPTH_TYPE_MSDUS = 1, +}; + +#define HTT_TX_Q_STATE_NUM_PEERS (TARGET_10_4_NUM_QCACHE_PEERS_MAX + \ + TARGET_10_4_NUM_VDEVS) +#define HTT_TX_Q_STATE_NUM_TIDS 8 +#define HTT_TX_Q_STATE_ENTRY_SIZE 1 +#define HTT_TX_Q_STATE_ENTRY_MULTIPLIER 0 + +/** + * struct htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config + * + * Defines host q state format and behavior. See htt_q_state. + * + * @paddr: Queue physical address + * @num_peers: Number of supported peers + * @num_tids: Number of supported TIDs + * @record_size: Defines the size of each host q entry in bytes. In practice + * however firmware (at least 10.4.3-00191) ignores this host + * configuration value and uses hardcoded value of 1. + * @record_multiplier: This is valid only when q depth type is MSDUs. It + * defines the exponent for the power of 2 multiplication. + * @pad: struct padding for 32-bit alignment + */ +struct htt_q_state_conf { + __le32 paddr; + __le16 num_peers; + __le16 num_tids; + u8 record_size; + u8 record_multiplier; + u8 pad[2]; +} __packed; -struct htt_frag_desc_bank_cfg { +struct htt_frag_desc_bank_cfg32 { u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */ u8 num_banks; u8 desc_size; __le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX]; struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX]; + struct htt_q_state_conf q_state; +} __packed; + +struct htt_frag_desc_bank_cfg64 { + u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */ + u8 num_banks; + u8 desc_size; + __le64 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX]; + struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX]; + struct htt_q_state_conf q_state; +} __packed; + +#define HTT_TX_Q_STATE_ENTRY_COEFFICIENT 128 +#define HTT_TX_Q_STATE_ENTRY_FACTOR_MASK 0x3f +#define HTT_TX_Q_STATE_ENTRY_FACTOR_LSB 0 +#define HTT_TX_Q_STATE_ENTRY_EXP_MASK 0xc0 +#define HTT_TX_Q_STATE_ENTRY_EXP_LSB 6 + +/** + * struct htt_q_state - shared between host and firmware via DMA + * + * This structure is used for the host to expose it's software queue state to + * firmware so that its rate control can schedule fetch requests for optimized + * performance. This is most notably used for MU-MIMO aggregation when multiple + * MU clients are connected. + * + * @count: Each element defines the host queue depth. When q depth type was + * configured as HTT_Q_DEPTH_TYPE_BYTES then each entry is defined as: + * FACTOR * 128 * 8^EXP (see HTT_TX_Q_STATE_ENTRY_FACTOR_MASK and + * HTT_TX_Q_STATE_ENTRY_EXP_MASK). When q depth type was configured as + * HTT_Q_DEPTH_TYPE_MSDUS the number of packets is scaled by 2 ** + * record_multiplier (see htt_q_state_conf). + * @map: Used by firmware to quickly check which host queues are not empty. It + * is a bitmap simply saying. + * @seq: Used by firmware to quickly check if the host queues were updated + * since it last checked. + * + * FIXME: Is the q_state map[] size calculation really correct? + */ +struct htt_q_state { + u8 count[HTT_TX_Q_STATE_NUM_TIDS][HTT_TX_Q_STATE_NUM_PEERS]; + u32 map[HTT_TX_Q_STATE_NUM_TIDS][(HTT_TX_Q_STATE_NUM_PEERS + 31) / 32]; + __le32 seq; +} __packed; + +#define HTT_TX_FETCH_RECORD_INFO_PEER_ID_MASK 0x0fff +#define HTT_TX_FETCH_RECORD_INFO_PEER_ID_LSB 0 +#define HTT_TX_FETCH_RECORD_INFO_TID_MASK 0xf000 +#define HTT_TX_FETCH_RECORD_INFO_TID_LSB 12 + +struct htt_tx_fetch_record { + __le16 info; /* HTT_TX_FETCH_IND_RECORD_INFO_ */ + __le16 num_msdus; + __le32 num_bytes; +} __packed; + +struct htt_tx_fetch_ind { + u8 pad0; + __le16 fetch_seq_num; + __le32 token; + __le16 num_resp_ids; + __le16 num_records; + union { + /* ath10k_htt_get_tx_fetch_ind_resp_ids() */ + DECLARE_FLEX_ARRAY(__le32, resp_ids); + DECLARE_FLEX_ARRAY(struct htt_tx_fetch_record, records); + } __packed; +} __packed; + +static inline void * +ath10k_htt_get_tx_fetch_ind_resp_ids(struct htt_tx_fetch_ind *ind) +{ + return (void *)&ind->records[le16_to_cpu(ind->num_records)]; +} + +struct htt_tx_fetch_resp { + u8 pad0; + __le16 resp_id; + __le16 fetch_seq_num; + __le16 num_records; + __le32 token; + struct htt_tx_fetch_record records[]; +} __packed; + +struct htt_tx_fetch_confirm { + u8 pad0; + __le16 num_resp_ids; + __le32 resp_ids[]; +} __packed; + +enum htt_tx_mode_switch_mode { + HTT_TX_MODE_SWITCH_PUSH = 0, + HTT_TX_MODE_SWITCH_PUSH_PULL = 1, +}; + +#define HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE BIT(0) +#define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_MASK 0xfffe +#define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_LSB 1 + +#define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_MASK 0x0003 +#define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_LSB 0 +#define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_MASK 0xfffc +#define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_LSB 2 + +#define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_MASK 0x0fff +#define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_LSB 0 +#define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_MASK 0xf000 +#define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_LSB 12 + +struct htt_tx_mode_switch_record { + __le16 info0; /* HTT_TX_MODE_SWITCH_RECORD_INFO0_ */ + __le16 num_max_msdus; +} __packed; + +struct htt_tx_mode_switch_ind { + u8 pad0; + __le16 info0; /* HTT_TX_MODE_SWITCH_IND_INFO0_ */ + __le16 info1; /* HTT_TX_MODE_SWITCH_IND_INFO1_ */ + u8 pad1[2]; + struct htt_tx_mode_switch_record records[]; +} __packed; + +struct htt_channel_change { + u8 pad[3]; + __le32 freq; + __le32 center_freq1; + __le32 center_freq2; + __le32 phymode; +} __packed; + +struct htt_per_peer_tx_stats_ind { + __le32 succ_bytes; + __le32 retry_bytes; + __le32 failed_bytes; + u8 ratecode; + u8 flags; + __le16 peer_id; + __le16 succ_pkts; + __le16 retry_pkts; + __le16 failed_pkts; + __le16 tx_duration; + __le32 reserved1; + __le32 reserved2; +} __packed; + +struct htt_peer_tx_stats { + u8 num_ppdu; + u8 ppdu_len; + u8 version; + u8 payload[]; +} __packed; + +#define ATH10K_10_2_TX_STATS_OFFSET 136 +#define PEER_STATS_FOR_NO_OF_PPDUS 4 + +struct ath10k_10_2_peer_tx_stats { + u8 ratecode[PEER_STATS_FOR_NO_OF_PPDUS]; + u8 success_pkts[PEER_STATS_FOR_NO_OF_PPDUS]; + __le16 success_bytes[PEER_STATS_FOR_NO_OF_PPDUS]; + u8 retry_pkts[PEER_STATS_FOR_NO_OF_PPDUS]; + __le16 retry_bytes[PEER_STATS_FOR_NO_OF_PPDUS]; + u8 failed_pkts[PEER_STATS_FOR_NO_OF_PPDUS]; + __le16 failed_bytes[PEER_STATS_FOR_NO_OF_PPDUS]; + u8 flags[PEER_STATS_FOR_NO_OF_PPDUS]; + __le32 tx_duration; + u8 tx_ppdu_cnt; + u8 peer_id; } __packed; union htt_rx_pn_t { @@ -1111,10 +1677,10 @@ union htt_rx_pn_t { u32 pn24; /* TKIP or CCMP: 48-bit PN */ - u_int64_t pn48; + u64 pn48; /* WAPI: 128-bit PN */ - u_int64_t pn128[2]; + u64 pn128[2]; }; struct htt_cmd { @@ -1123,11 +1689,15 @@ struct htt_cmd { struct htt_ver_req ver_req; struct htt_mgmt_tx_desc mgmt_tx; struct htt_data_tx_desc data_tx; - struct htt_rx_ring_setup rx_setup; + struct htt_rx_ring_setup_32 rx_setup_32; + struct htt_rx_ring_setup_64 rx_setup_64; struct htt_stats_req stats_req; struct htt_oob_sync_req oob_sync_req; struct htt_aggr_conf aggr_conf; - struct htt_frag_desc_bank_cfg frag_desc_bank_cfg; + struct htt_aggr_conf_v2 aggr_conf_v2; + struct htt_frag_desc_bank_cfg32 frag_desc_bank_cfg32; + struct htt_frag_desc_bank_cfg64 frag_desc_bank_cfg64; + struct htt_tx_fetch_resp tx_fetch_resp; }; } __packed; @@ -1138,6 +1708,7 @@ struct htt_resp { struct htt_mgmt_tx_completion mgmt_tx_completion; struct htt_data_tx_completion data_tx_completion; struct htt_rx_indication rx_ind; + struct htt_rx_indication_hl rx_ind_hl; struct htt_rx_fragment_indication rx_frag_ind; struct htt_rx_peer_map peer_map; struct htt_rx_peer_unmap peer_unmap; @@ -1148,17 +1719,30 @@ struct htt_resp { struct htt_rc_update rc_update; struct htt_rx_test rx_test; struct htt_pktlog_msg pktlog_msg; - struct htt_stats_conf stats_conf; - }; + struct htt_rx_pn_ind rx_pn_ind; + struct htt_rx_offload_ind rx_offload_ind; + struct htt_rx_in_ord_ind rx_in_ord_ind; + struct htt_tx_fetch_ind tx_fetch_ind; + struct htt_tx_fetch_confirm tx_fetch_confirm; + struct htt_tx_mode_switch_ind tx_mode_switch_ind; + struct htt_channel_change chan_change; + struct htt_peer_tx_stats peer_tx_stats; + } __packed; } __packed; - /*** host side structures follow ***/ struct htt_tx_done { - u32 msdu_id; - bool discard; - bool no_ack; + u16 msdu_id; + u16 status; + u8 ack_rssi; +}; + +enum htt_tx_compl_state { + HTT_TX_COMPL_STATE_NONE, + HTT_TX_COMPL_STATE_ACK, + HTT_TX_COMPL_STATE_NOACK, + HTT_TX_COMPL_STATE_DISCARD, }; struct htt_peer_map_event { @@ -1171,27 +1755,34 @@ struct htt_peer_unmap_event { u16 peer_id; }; -struct htt_rx_info { - struct sk_buff *skb; - enum htt_rx_mpdu_status status; - enum htt_rx_mpdu_encrypt_type encrypt_type; - s8 signal; - struct { - u8 info0; - u32 info1; - u32 info2; - } rate; - bool fcs_err; -}; +struct ath10k_htt_txbuf_32 { + struct htt_data_tx_desc_frag frags[2]; + struct ath10k_htc_hdr htc_hdr; + struct htt_cmd_hdr cmd_hdr; + struct htt_data_tx_desc cmd_tx; +} __packed __aligned(4); + +struct ath10k_htt_txbuf_64 { + struct htt_data_tx_desc_frag frags[2]; + struct ath10k_htc_hdr htc_hdr; + struct htt_cmd_hdr cmd_hdr; + struct htt_data_tx_desc_64 cmd_tx; +} __packed __aligned(4); struct ath10k_htt { struct ath10k *ar; enum ath10k_htc_ep_id eid; - int max_throughput_mbps; + struct sk_buff_head rx_indication_head; + u8 target_version_major; u8 target_version_minor; struct completion target_version_received; + u8 max_num_amsdu; + u8 max_num_ampdu; + + const enum htt_t2h_msg_type *t2h_msg_types; + u32 t2h_msg_types_max; struct { /* @@ -1204,13 +1795,30 @@ struct ath10k_htt { * filled. */ struct sk_buff **netbufs_ring; + + /* This is used only with firmware supporting IN_ORD_IND. + * + * With Full Rx Reorder the HTT Rx Ring is more of a temporary + * buffer ring from which buffer addresses are copied by the + * firmware to MAC Rx ring. Firmware then delivers IN_ORD_IND + * pointing to specific (re-ordered) buffers. + * + * FIXME: With kernel generic hashing functions there's a lot + * of hash collisions for sk_buffs. + */ + bool in_ord_rx; + DECLARE_HASHTABLE(skb_table, 4); + /* * Ring of buffer addresses - * This ring holds the "physical" device address of the * rx buffers the host SW provides for the MAC HW to * fill. */ - __le32 *paddrs_ring; + union { + __le64 *paddrs_ring_64; + __le32 *paddrs_ring_32; + }; /* * Base address of ring, as a "physical" device address @@ -1222,7 +1830,7 @@ struct ath10k_htt { int size; /* size - 1 */ - unsigned size_mask; + unsigned int size_mask; /* how many rx buffers to keep in the ring */ int fill_level; @@ -1243,7 +1851,7 @@ struct ath10k_htt { /* where HTT SW has processed bufs filled by rx MAC DMA */ struct { - unsigned msdu_payld; + unsigned int msdu_payld; } sw_rd_idx; /* @@ -1258,31 +1866,246 @@ struct ath10k_htt { unsigned int prefetch_len; - /* Protects access to %pending_tx, %used_msdu_ids */ + /* Protects access to pending_tx, num_pending_tx */ spinlock_t tx_lock; int max_num_pending_tx; int num_pending_tx; - struct sk_buff **pending_tx; - unsigned long *used_msdu_ids; /* bitmap */ + int num_pending_mgmt_tx; + struct idr pending_tx; wait_queue_head_t empty_tx_wq; + /* FIFO for storing tx done status {ack, no-ack, discard} and msdu id */ + DECLARE_KFIFO_PTR(txdone_fifo, struct htt_tx_done); + /* set if host-fw communication goes haywire - * used to avoid further failures */ + * used to avoid further failures + */ bool rx_confused; + atomic_t num_mpdus_ready; + + /* This is used to group tx/rx completions separately and process them + * in batches to reduce cache stalls + */ + struct sk_buff_head rx_msdus_q; + struct sk_buff_head rx_in_ord_compl_q; + struct sk_buff_head tx_fetch_ind_q; + + /* rx_status template */ + struct ieee80211_rx_status rx_status; + + struct { + dma_addr_t paddr; + union { + struct htt_msdu_ext_desc *vaddr_desc_32; + struct htt_msdu_ext_desc_64 *vaddr_desc_64; + }; + size_t size; + } frag_desc; + + struct { + dma_addr_t paddr; + union { + struct ath10k_htt_txbuf_32 *vaddr_txbuff_32; + struct ath10k_htt_txbuf_64 *vaddr_txbuff_64; + }; + size_t size; + } txbuf; + + struct { + bool enabled; + struct htt_q_state *vaddr; + dma_addr_t paddr; + u16 num_push_allowed; + u16 num_peers; + u16 num_tids; + enum htt_tx_mode_switch_mode mode; + enum htt_q_depth_type type; + } tx_q_state; + + bool tx_mem_allocated; + const struct ath10k_htt_tx_ops *tx_ops; + const struct ath10k_htt_rx_ops *rx_ops; + bool disable_tx_comp; + bool bundle_tx; + struct sk_buff_head tx_req_head; + struct sk_buff_head tx_complete_head; }; +struct ath10k_htt_tx_ops { + int (*htt_send_rx_ring_cfg)(struct ath10k_htt *htt); + int (*htt_send_frag_desc_bank_cfg)(struct ath10k_htt *htt); + int (*htt_alloc_frag_desc)(struct ath10k_htt *htt); + void (*htt_free_frag_desc)(struct ath10k_htt *htt); + int (*htt_tx)(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, + struct sk_buff *msdu); + int (*htt_alloc_txbuff)(struct ath10k_htt *htt); + void (*htt_free_txbuff)(struct ath10k_htt *htt); + int (*htt_h2t_aggr_cfg_msg)(struct ath10k_htt *htt, + u8 max_subfrms_ampdu, + u8 max_subfrms_amsdu); + void (*htt_flush_tx)(struct ath10k_htt *htt); +}; + +static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt) +{ + if (!htt->tx_ops->htt_send_rx_ring_cfg) + return -EOPNOTSUPP; + + return htt->tx_ops->htt_send_rx_ring_cfg(htt); +} + +static inline int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) +{ + if (!htt->tx_ops->htt_send_frag_desc_bank_cfg) + return -EOPNOTSUPP; + + return htt->tx_ops->htt_send_frag_desc_bank_cfg(htt); +} + +static inline int ath10k_htt_alloc_frag_desc(struct ath10k_htt *htt) +{ + if (!htt->tx_ops->htt_alloc_frag_desc) + return -EOPNOTSUPP; + + return htt->tx_ops->htt_alloc_frag_desc(htt); +} + +static inline void ath10k_htt_free_frag_desc(struct ath10k_htt *htt) +{ + if (htt->tx_ops->htt_free_frag_desc) + htt->tx_ops->htt_free_frag_desc(htt); +} + +static inline int ath10k_htt_tx(struct ath10k_htt *htt, + enum ath10k_hw_txrx_mode txmode, + struct sk_buff *msdu) +{ + return htt->tx_ops->htt_tx(htt, txmode, msdu); +} + +static inline void ath10k_htt_flush_tx(struct ath10k_htt *htt) +{ + if (htt->tx_ops->htt_flush_tx) + htt->tx_ops->htt_flush_tx(htt); +} + +static inline int ath10k_htt_alloc_txbuff(struct ath10k_htt *htt) +{ + if (!htt->tx_ops->htt_alloc_txbuff) + return -EOPNOTSUPP; + + return htt->tx_ops->htt_alloc_txbuff(htt); +} + +static inline void ath10k_htt_free_txbuff(struct ath10k_htt *htt) +{ + if (htt->tx_ops->htt_free_txbuff) + htt->tx_ops->htt_free_txbuff(htt); +} + +static inline int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, + u8 max_subfrms_ampdu, + u8 max_subfrms_amsdu) + +{ + if (!htt->tx_ops->htt_h2t_aggr_cfg_msg) + return -EOPNOTSUPP; + + return htt->tx_ops->htt_h2t_aggr_cfg_msg(htt, + max_subfrms_ampdu, + max_subfrms_amsdu); +} + +struct ath10k_htt_rx_ops { + size_t (*htt_get_rx_ring_size)(struct ath10k_htt *htt); + void (*htt_config_paddrs_ring)(struct ath10k_htt *htt, void *vaddr); + void (*htt_set_paddrs_ring)(struct ath10k_htt *htt, dma_addr_t paddr, + int idx); + void* (*htt_get_vaddr_ring)(struct ath10k_htt *htt); + void (*htt_reset_paddrs_ring)(struct ath10k_htt *htt, int idx); + bool (*htt_rx_proc_rx_frag_ind)(struct ath10k_htt *htt, + struct htt_rx_fragment_indication *rx, + struct sk_buff *skb); +}; + +static inline size_t ath10k_htt_get_rx_ring_size(struct ath10k_htt *htt) +{ + if (!htt->rx_ops->htt_get_rx_ring_size) + return 0; + + return htt->rx_ops->htt_get_rx_ring_size(htt); +} + +static inline void ath10k_htt_config_paddrs_ring(struct ath10k_htt *htt, + void *vaddr) +{ + if (htt->rx_ops->htt_config_paddrs_ring) + htt->rx_ops->htt_config_paddrs_ring(htt, vaddr); +} + +static inline void ath10k_htt_set_paddrs_ring(struct ath10k_htt *htt, + dma_addr_t paddr, + int idx) +{ + if (htt->rx_ops->htt_set_paddrs_ring) + htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx); +} + +static inline void *ath10k_htt_get_vaddr_ring(struct ath10k_htt *htt) +{ + if (!htt->rx_ops->htt_get_vaddr_ring) + return NULL; + + return htt->rx_ops->htt_get_vaddr_ring(htt); +} + +static inline void ath10k_htt_reset_paddrs_ring(struct ath10k_htt *htt, int idx) +{ + if (htt->rx_ops->htt_reset_paddrs_ring) + htt->rx_ops->htt_reset_paddrs_ring(htt, idx); +} + +static inline bool ath10k_htt_rx_proc_rx_frag_ind(struct ath10k_htt *htt, + struct htt_rx_fragment_indication *rx, + struct sk_buff *skb) +{ + if (!htt->rx_ops->htt_rx_proc_rx_frag_ind) + return true; + + return htt->rx_ops->htt_rx_proc_rx_frag_ind(htt, rx, skb); +} + +/* the driver strongly assumes that the rx header status be 64 bytes long, + * so all possible rx_desc structures must respect this assumption. + */ #define RX_HTT_HDR_STATUS_LEN 64 -/* This structure layout is programmed via rx ring setup +/* The rx descriptor structure layout is programmed via rx ring setup * so that FW knows how to transfer the rx descriptor to the host. - * Buffers like this are placed on the rx ring. */ + * Unfortunately, though, QCA6174's firmware doesn't currently behave correctly + * when modifying the structure layout of the rx descriptor beyond what it expects + * (even if it correctly programmed during the rx ring setup). + * Therefore we must keep two different memory layouts, abstract the rx descriptor + * representation and use ath10k_rx_desc_ops + * for correctly accessing rx descriptor data. + */ + +/* base struct used for abstracting the rx descriptor representation */ struct htt_rx_desc { union { /* This field is filled on the host using the msdu buffer - * from htt_rx_indication */ + * from htt_rx_indication + */ struct fw_rx_desc_base fw_desc; u32 pad; } __packed; +} __packed; + +/* rx descriptor for wcn3990 and possibly extensible for newer cards + * Buffers like this are placed on the rx ring. + */ +struct htt_rx_desc_v2 { + struct htt_rx_desc base; struct { struct rx_attention attention; struct rx_frag_info frag_info; @@ -1294,9 +2117,266 @@ struct htt_rx_desc { struct rx_ppdu_end ppdu_end; } __packed; u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN]; - u8 msdu_payload[0]; + u8 msdu_payload[]; +}; + +/* QCA6174, QCA988x, QCA99x0 dedicated rx descriptor to make sure their firmware + * works correctly. We keep a single rx descriptor for all these three + * families of cards because from tests it seems to be the most stable solution, + * e.g. having a rx descriptor only for QCA6174 seldom caused firmware crashes + * during some tests. + * Buffers like this are placed on the rx ring. + */ +struct htt_rx_desc_v1 { + struct htt_rx_desc base; + struct { + struct rx_attention attention; + struct rx_frag_info_v1 frag_info; + struct rx_mpdu_start mpdu_start; + struct rx_msdu_start_v1 msdu_start; + struct rx_msdu_end_v1 msdu_end; + struct rx_mpdu_end mpdu_end; + struct rx_ppdu_start ppdu_start; + struct rx_ppdu_end_v1 ppdu_end; + } __packed; + u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN]; + u8 msdu_payload[]; +}; + +/* rx_desc abstraction */ +struct ath10k_htt_rx_desc_ops { + /* These fields are mandatory, they must be specified in any instance */ + + /* sizeof() of the rx_desc structure used by this hw */ + size_t rx_desc_size; + + /* offset of msdu_payload inside the rx_desc structure used by this hw */ + size_t rx_desc_msdu_payload_offset; + + /* These fields are options. + * When a field is not provided the default implementation gets used + * (see the ath10k_rx_desc_* operations below for more info about the defaults) + */ + bool (*rx_desc_get_msdu_limit_error)(struct htt_rx_desc *rxd); + int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd); + + /* Safely cast from a void* buffer containing an rx descriptor + * to the proper rx_desc structure + */ + struct htt_rx_desc *(*rx_desc_from_raw_buffer)(void *buff); + + void (*rx_desc_get_offsets)(struct htt_rx_ring_rx_desc_offsets *offs); + struct rx_attention *(*rx_desc_get_attention)(struct htt_rx_desc *rxd); + struct rx_frag_info_common *(*rx_desc_get_frag_info)(struct htt_rx_desc *rxd); + struct rx_mpdu_start *(*rx_desc_get_mpdu_start)(struct htt_rx_desc *rxd); + struct rx_mpdu_end *(*rx_desc_get_mpdu_end)(struct htt_rx_desc *rxd); + struct rx_msdu_start_common *(*rx_desc_get_msdu_start)(struct htt_rx_desc *rxd); + struct rx_msdu_end_common *(*rx_desc_get_msdu_end)(struct htt_rx_desc *rxd); + struct rx_ppdu_start *(*rx_desc_get_ppdu_start)(struct htt_rx_desc *rxd); + struct rx_ppdu_end_common *(*rx_desc_get_ppdu_end)(struct htt_rx_desc *rxd); + u8 *(*rx_desc_get_rx_hdr_status)(struct htt_rx_desc *rxd); + u8 *(*rx_desc_get_msdu_payload)(struct htt_rx_desc *rxd); +}; + +extern const struct ath10k_htt_rx_desc_ops qca988x_rx_desc_ops; +extern const struct ath10k_htt_rx_desc_ops qca99x0_rx_desc_ops; +extern const struct ath10k_htt_rx_desc_ops wcn3990_rx_desc_ops; + +static inline int +ath10k_htt_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) +{ + if (hw->rx_desc_ops->rx_desc_get_l3_pad_bytes) + return hw->rx_desc_ops->rx_desc_get_l3_pad_bytes(rxd); + return 0; +} + +static inline bool +ath10k_htt_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) +{ + if (hw->rx_desc_ops->rx_desc_get_msdu_limit_error) + return hw->rx_desc_ops->rx_desc_get_msdu_limit_error(rxd); + return false; +} + +/* The default implementation of all these getters is using the old rx_desc, + * so that it is easier to define the ath10k_htt_rx_desc_ops instances. + * But probably, if new wireless cards must be supported, it would be better + * to switch the default implementation to the new rx_desc, since this would + * make the extension easier . + */ +static inline struct htt_rx_desc * +ath10k_htt_rx_desc_from_raw_buffer(struct ath10k_hw_params *hw, void *buff) +{ + if (hw->rx_desc_ops->rx_desc_from_raw_buffer) + return hw->rx_desc_ops->rx_desc_from_raw_buffer(buff); + return &((struct htt_rx_desc_v1 *)buff)->base; +} + +static inline void +ath10k_htt_rx_desc_get_offsets(struct ath10k_hw_params *hw, + struct htt_rx_ring_rx_desc_offsets *off) +{ + if (hw->rx_desc_ops->rx_desc_get_offsets) { + hw->rx_desc_ops->rx_desc_get_offsets(off); + } else { +#define desc_offset(x) (offsetof(struct htt_rx_desc_v1, x) / 4) + off->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); + off->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload)); + off->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start)); + off->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end)); + off->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start)); + off->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end)); + off->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start)); + off->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end)); + off->rx_attention_offset = __cpu_to_le16(desc_offset(attention)); + off->frag_info_offset = __cpu_to_le16(desc_offset(frag_info)); +#undef desc_offset + } +} + +static inline struct rx_attention * +ath10k_htt_rx_desc_get_attention(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) +{ + struct htt_rx_desc_v1 *rx_desc; + + if (hw->rx_desc_ops->rx_desc_get_attention) + return hw->rx_desc_ops->rx_desc_get_attention(rxd); + + rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); + return &rx_desc->attention; +} + +static inline struct rx_frag_info_common * +ath10k_htt_rx_desc_get_frag_info(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) +{ + struct htt_rx_desc_v1 *rx_desc; + + if (hw->rx_desc_ops->rx_desc_get_frag_info) + return hw->rx_desc_ops->rx_desc_get_frag_info(rxd); + + rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); + return &rx_desc->frag_info.common; +} + +static inline struct rx_mpdu_start * +ath10k_htt_rx_desc_get_mpdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) +{ + struct htt_rx_desc_v1 *rx_desc; + + if (hw->rx_desc_ops->rx_desc_get_mpdu_start) + return hw->rx_desc_ops->rx_desc_get_mpdu_start(rxd); + + rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); + return &rx_desc->mpdu_start; +} + +static inline struct rx_mpdu_end * +ath10k_htt_rx_desc_get_mpdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) +{ + struct htt_rx_desc_v1 *rx_desc; + + if (hw->rx_desc_ops->rx_desc_get_mpdu_end) + return hw->rx_desc_ops->rx_desc_get_mpdu_end(rxd); + + rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); + return &rx_desc->mpdu_end; +} + +static inline struct rx_msdu_start_common * +ath10k_htt_rx_desc_get_msdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) +{ + struct htt_rx_desc_v1 *rx_desc; + + if (hw->rx_desc_ops->rx_desc_get_msdu_start) + return hw->rx_desc_ops->rx_desc_get_msdu_start(rxd); + + rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); + return &rx_desc->msdu_start.common; +} + +static inline struct rx_msdu_end_common * +ath10k_htt_rx_desc_get_msdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) +{ + struct htt_rx_desc_v1 *rx_desc; + + if (hw->rx_desc_ops->rx_desc_get_msdu_end) + return hw->rx_desc_ops->rx_desc_get_msdu_end(rxd); + + rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); + return &rx_desc->msdu_end.common; +} + +static inline struct rx_ppdu_start * +ath10k_htt_rx_desc_get_ppdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) +{ + struct htt_rx_desc_v1 *rx_desc; + + if (hw->rx_desc_ops->rx_desc_get_ppdu_start) + return hw->rx_desc_ops->rx_desc_get_ppdu_start(rxd); + + rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); + return &rx_desc->ppdu_start; +} + +static inline struct rx_ppdu_end_common * +ath10k_htt_rx_desc_get_ppdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) +{ + struct htt_rx_desc_v1 *rx_desc; + + if (hw->rx_desc_ops->rx_desc_get_ppdu_end) + return hw->rx_desc_ops->rx_desc_get_ppdu_end(rxd); + + rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); + return &rx_desc->ppdu_end.common; +} + +static inline u8 * +ath10k_htt_rx_desc_get_rx_hdr_status(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) +{ + struct htt_rx_desc_v1 *rx_desc; + + if (hw->rx_desc_ops->rx_desc_get_rx_hdr_status) + return hw->rx_desc_ops->rx_desc_get_rx_hdr_status(rxd); + + rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); + return rx_desc->rx_hdr_status; +} + +static inline u8 * +ath10k_htt_rx_desc_get_msdu_payload(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) +{ + struct htt_rx_desc_v1 *rx_desc; + + if (hw->rx_desc_ops->rx_desc_get_msdu_payload) + return hw->rx_desc_ops->rx_desc_get_msdu_payload(rxd); + + rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); + return rx_desc->msdu_payload; +} + +#define HTT_RX_DESC_HL_INFO_SEQ_NUM_MASK 0x00000fff +#define HTT_RX_DESC_HL_INFO_SEQ_NUM_LSB 0 +#define HTT_RX_DESC_HL_INFO_ENCRYPTED_MASK 0x00001000 +#define HTT_RX_DESC_HL_INFO_ENCRYPTED_LSB 12 +#define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_MASK 0x00002000 +#define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_LSB 13 +#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_MASK 0x00010000 +#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_LSB 16 +#define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_MASK 0x01fe0000 +#define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_LSB 17 + +struct htt_rx_desc_base_hl { + __le32 info; /* HTT_RX_DESC_HL_INFO_ */ }; +struct htt_rx_chan_info { + __le16 primary_chan_center_freq_mhz; + __le16 contig_chan1_center_freq_mhz; + __le16 contig_chan2_center_freq_mhz; + u8 phy_mode; + u8 reserved; +} __packed; + #define HTT_RX_DESC_ALIGN 8 #define HTT_MAC_ADDR_LEN 6 @@ -1306,8 +2386,20 @@ struct htt_rx_desc { * Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size, * rounded up to a cache line size. */ -#define HTT_RX_BUF_SIZE 1920 -#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc)) +#define HTT_RX_BUF_SIZE 2048 + +/* The HTT_RX_MSDU_SIZE can't be statically computed anymore, + * because it depends on the underlying device rx_desc representation + */ +static inline int ath10k_htt_rx_msdu_size(struct ath10k_hw_params *hw) +{ + return HTT_RX_BUF_SIZE - (int)hw->rx_desc_ops->rx_desc_size; +} + +/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle + * aggregated traffic more nicely. + */ +#define ATH10K_HTT_MAX_NUM_REFILL 100 /* * DMA_MAP expects the buffer to be an integral number of cache lines. @@ -1317,22 +2409,57 @@ struct htt_rx_desc { #define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */ #define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1) -struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar); -int ath10k_htt_attach_target(struct ath10k_htt *htt); -void ath10k_htt_detach(struct ath10k_htt *htt); +/* These values are default in most firmware revisions and apparently are a + * sweet spot performance wise. + */ +#define ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT 3 +#define ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT 64 + +int ath10k_htt_connect(struct ath10k_htt *htt); +int ath10k_htt_init(struct ath10k *ar); +int ath10k_htt_setup(struct ath10k_htt *htt); + +int ath10k_htt_tx_start(struct ath10k_htt *htt); +void ath10k_htt_tx_stop(struct ath10k_htt *htt); +void ath10k_htt_tx_destroy(struct ath10k_htt *htt); +void ath10k_htt_tx_free(struct ath10k_htt *htt); + +int ath10k_htt_rx_alloc(struct ath10k_htt *htt); +int ath10k_htt_rx_ring_refill(struct ath10k *ar); +void ath10k_htt_rx_free(struct ath10k_htt *htt); -int ath10k_htt_tx_attach(struct ath10k_htt *htt); -void ath10k_htt_tx_detach(struct ath10k_htt *htt); -int ath10k_htt_rx_attach(struct ath10k_htt *htt); -void ath10k_htt_rx_detach(struct ath10k_htt *htt); void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb); -void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); +void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); +bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt); -int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt); - -void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt); -int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt); +int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u32 mask, u32 reset_mask, + u64 cookie); +void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb); +int ath10k_htt_tx_fetch_resp(struct ath10k *ar, + __le32 token, + __le16 fetch_seq_num, + struct htt_tx_fetch_record *records, + size_t num_records); +void ath10k_htt_op_ep_tx_credits(struct ath10k *ar); + +void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw, + struct ieee80211_txq *txq); +void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, + struct ieee80211_txq *txq); +void ath10k_htt_tx_txq_sync(struct ath10k *ar); +void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt); +int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt); +void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt); +int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt, + bool is_presp); + +int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb); void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id); -int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *); -int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *); +int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu); +void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar, + struct sk_buff *skb); +int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget); +int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget); +void ath10k_htt_set_tx_ops(struct ath10k_htt *htt); +void ath10k_htt_set_rx_ops(struct ath10k_htt *htt); #endif diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index de058d7adca8..d7e429041065 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -1,132 +1,163 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ +#include <linux/export.h> + +#include "core.h" #include "htc.h" #include "htt.h" #include "txrx.h" #include "debug.h" +#include "trace.h" +#include "mac.h" #include <linux/log2.h> +#include <linux/bitfield.h> -/* slightly larger than one large A-MPDU */ -#define HTT_RX_RING_SIZE_MIN 128 +/* when under memory pressure rx ring refill may fail and needs a retry */ +#define HTT_RX_RING_REFILL_RETRY_MS 50 -/* roughly 20 ms @ 1 Gbps of 1500B MSDUs */ -#define HTT_RX_RING_SIZE_MAX 2048 +#define HTT_RX_RING_REFILL_RESCHED_MS 5 -#define HTT_RX_AVG_FRM_BYTES 1000 +/* shortcut to interpret a raw memory buffer as a rx descriptor */ +#define HTT_RX_BUF_TO_RX_DESC(hw, buf) ath10k_htt_rx_desc_from_raw_buffer(hw, buf) -/* ms, very conservative */ -#define HTT_RX_HOST_LATENCY_MAX_MS 20 +static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb); -/* ms, conservative */ -#define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10 +static struct sk_buff * +ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr) +{ + struct ath10k_skb_rxcb *rxcb; -/* when under memory pressure rx ring refill may fail and needs a retry */ -#define HTT_RX_RING_REFILL_RETRY_MS 50 + hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr) + if (rxcb->paddr == paddr) + return ATH10K_RXCB_SKB(rxcb); + + WARN_ON_ONCE(1); + return NULL; +} -static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt) +static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) { - int size; + struct sk_buff *skb; + struct ath10k_skb_rxcb *rxcb; + struct hlist_node *n; + int i; - /* - * It is expected that the host CPU will typically be able to - * service the rx indication from one A-MPDU before the rx - * indication from the subsequent A-MPDU happens, roughly 1-2 ms - * later. However, the rx ring should be sized very conservatively, - * to accomodate the worst reasonable delay before the host CPU - * services a rx indication interrupt. - * - * The rx ring need not be kept full of empty buffers. In theory, - * the htt host SW can dynamically track the low-water mark in the - * rx ring, and dynamically adjust the level to which the rx ring - * is filled with empty buffers, to dynamically meet the desired - * low-water mark. - * - * In contrast, it's difficult to resize the rx ring itself, once - * it's in use. Thus, the ring itself should be sized very - * conservatively, while the degree to which the ring is filled - * with empty buffers should be sized moderately conservatively. - */ + if (htt->rx_ring.in_ord_rx) { + hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) { + skb = ATH10K_RXCB_SKB(rxcb); + dma_unmap_single(htt->ar->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + hash_del(&rxcb->hlist); + dev_kfree_skb_any(skb); + } + } else { + for (i = 0; i < htt->rx_ring.size; i++) { + skb = htt->rx_ring.netbufs_ring[i]; + if (!skb) + continue; - /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ - size = - htt->max_throughput_mbps + - 1000 / - (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS; + rxcb = ATH10K_SKB_RXCB(skb); + dma_unmap_single(htt->ar->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + } + } - if (size < HTT_RX_RING_SIZE_MIN) - size = HTT_RX_RING_SIZE_MIN; + htt->rx_ring.fill_cnt = 0; + hash_init(htt->rx_ring.skb_table); + memset(htt->rx_ring.netbufs_ring, 0, + htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0])); +} - if (size > HTT_RX_RING_SIZE_MAX) - size = HTT_RX_RING_SIZE_MAX; +static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt) +{ + return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32); +} - size = roundup_pow_of_two(size); +static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt) +{ + return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64); +} - return size; +static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt, + void *vaddr) +{ + htt->rx_ring.paddrs_ring_32 = vaddr; } -static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt) +static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt, + void *vaddr) { - int size; + htt->rx_ring.paddrs_ring_64 = vaddr; +} - /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ - size = - htt->max_throughput_mbps * - 1000 / - (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS; +static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt, + dma_addr_t paddr, int idx) +{ + htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr); +} - /* - * Make sure the fill level is at least 1 less than the ring size. - * Leaving 1 element empty allows the SW to easily distinguish - * between a full ring vs. an empty ring. - */ - if (size >= htt->rx_ring.size) - size = htt->rx_ring.size - 1; +static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt, + dma_addr_t paddr, int idx) +{ + htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr); +} - return size; +static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx) +{ + htt->rx_ring.paddrs_ring_32[idx] = 0; } -static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) +static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx) { - struct sk_buff *skb; - struct ath10k_skb_cb *cb; - int i; + htt->rx_ring.paddrs_ring_64[idx] = 0; +} - for (i = 0; i < htt->rx_ring.fill_cnt; i++) { - skb = htt->rx_ring.netbufs_ring[i]; - cb = ATH10K_SKB_CB(skb); - dma_unmap_single(htt->ar->dev, cb->paddr, - skb->len + skb_tailroom(skb), - DMA_FROM_DEVICE); - dev_kfree_skb_any(skb); - } +static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt) +{ + return (void *)htt->rx_ring.paddrs_ring_32; +} - htt->rx_ring.fill_cnt = 0; +static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt) +{ + return (void *)htt->rx_ring.paddrs_ring_64; } static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) { + struct ath10k_hw_params *hw = &htt->ar->hw_params; struct htt_rx_desc *rx_desc; + struct ath10k_skb_rxcb *rxcb; struct sk_buff *skb; dma_addr_t paddr; int ret = 0, idx; - idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr)); + /* The Full Rx Reorder firmware has no way of telling the host + * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring. + * To keep things simple make sure ring is always half empty. This + * guarantees there'll be no replenishment overruns possible. + */ + BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2); + + idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); + + if (idx < 0 || idx >= htt->rx_ring.size) { + ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n"); + idx &= htt->rx_ring.size_mask; + ret = -ENOMEM; + goto fail; + } + while (num > 0) { skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); if (!skb) { @@ -140,8 +171,8 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) skb->data); /* Clear rx_desc attention word before posting to Rx ring */ - rx_desc = (struct htt_rx_desc *)skb->data; - rx_desc->attention.flags = __cpu_to_le32(0); + rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, skb->data); + ath10k_htt_rx_desc_get_attention(hw, rx_desc)->flags = __cpu_to_le32(0); paddr = dma_map_single(htt->ar->dev, skb->data, skb->len + skb_tailroom(skb), @@ -153,18 +184,30 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) goto fail; } - ATH10K_SKB_CB(skb)->paddr = paddr; + rxcb = ATH10K_SKB_RXCB(skb); + rxcb->paddr = paddr; htt->rx_ring.netbufs_ring[idx] = skb; - htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr); + ath10k_htt_set_paddrs_ring(htt, paddr, idx); htt->rx_ring.fill_cnt++; + if (htt->rx_ring.in_ord_rx) { + hash_add(htt->rx_ring.skb_table, + &ATH10K_SKB_RXCB(skb)->hlist, + paddr); + } + num--; idx++; idx &= htt->rx_ring.size_mask; } fail: - *(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx); + /* + * Make sure the rx buffer is updated before available buffer + * index to avoid any potential rx ring corruption. + */ + mb(); + *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx); return ret; } @@ -176,10 +219,28 @@ static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) { - int ret, num_to_fill; + int ret, num_deficit, num_to_fill; + /* Refilling the whole RX ring buffer proves to be a bad idea. The + * reason is RX may take up significant amount of CPU cycles and starve + * other tasks, e.g. TX on an ethernet device while acting as a bridge + * with ath10k wlan interface. This ended up with very poor performance + * once CPU the host system was overwhelmed with RX on ath10k. + * + * By limiting the number of refills the replenishing occurs + * progressively. This in turns makes use of the fact tasklets are + * processed in FIFO order. This means actual RX processing can starve + * out refilling. If there's not enough buffers on RX ring FW will not + * report RX until it is refilled with enough buffers. This + * automatically balances load wrt to CPU power. + * + * This probably comes at a cost of lower maximum throughput but + * improves the average and stability. + */ spin_lock_bh(&htt->rx_ring.lock); - num_to_fill = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; + num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; + num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit); + num_deficit -= num_to_fill; ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill); if (ret == -ENOMEM) { /* @@ -190,123 +251,145 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) */ mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); + } else if (num_deficit > 0) { + mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + + msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS)); } spin_unlock_bh(&htt->rx_ring.lock); } -static void ath10k_htt_rx_ring_refill_retry(unsigned long arg) +static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t) { - struct ath10k_htt *htt = (struct ath10k_htt *)arg; + struct ath10k_htt *htt = timer_container_of(htt, t, + rx_ring.refill_retry_timer); + ath10k_htt_rx_msdu_buff_replenish(htt); } -static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt) +int ath10k_htt_rx_ring_refill(struct ath10k *ar) { - return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) - - htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask; + struct ath10k_htt *htt = &ar->htt; + int ret; + + if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) + return 0; + + spin_lock_bh(&htt->rx_ring.lock); + ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level - + htt->rx_ring.fill_cnt)); + + if (ret) + ath10k_htt_rx_ring_free(htt); + + spin_unlock_bh(&htt->rx_ring.lock); + + return ret; } -void ath10k_htt_rx_detach(struct ath10k_htt *htt) +void ath10k_htt_rx_free(struct ath10k_htt *htt) { - int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld; + if (htt->ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) + return; - del_timer_sync(&htt->rx_ring.refill_retry_timer); + timer_delete_sync(&htt->rx_ring.refill_retry_timer); - while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) { - struct sk_buff *skb = - htt->rx_ring.netbufs_ring[sw_rd_idx]; - struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); + skb_queue_purge(&htt->rx_msdus_q); + skb_queue_purge(&htt->rx_in_ord_compl_q); + skb_queue_purge(&htt->tx_fetch_ind_q); - dma_unmap_single(htt->ar->dev, cb->paddr, - skb->len + skb_tailroom(skb), - DMA_FROM_DEVICE); - dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]); - sw_rd_idx++; - sw_rd_idx &= htt->rx_ring.size_mask; - } + spin_lock_bh(&htt->rx_ring.lock); + ath10k_htt_rx_ring_free(htt); + spin_unlock_bh(&htt->rx_ring.lock); dma_free_coherent(htt->ar->dev, - (htt->rx_ring.size * - sizeof(htt->rx_ring.paddrs_ring)), - htt->rx_ring.paddrs_ring, + ath10k_htt_get_rx_ring_size(htt), + ath10k_htt_get_vaddr_ring(htt), htt->rx_ring.base_paddr); + ath10k_htt_config_paddrs_ring(htt, NULL); + dma_free_coherent(htt->ar->dev, sizeof(*htt->rx_ring.alloc_idx.vaddr), htt->rx_ring.alloc_idx.vaddr, htt->rx_ring.alloc_idx.paddr); + htt->rx_ring.alloc_idx.vaddr = NULL; kfree(htt->rx_ring.netbufs_ring); + htt->rx_ring.netbufs_ring = NULL; } static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) { + struct ath10k *ar = htt->ar; int idx; struct sk_buff *msdu; - spin_lock_bh(&htt->rx_ring.lock); + lockdep_assert_held(&htt->rx_ring.lock); - if (ath10k_htt_rx_ring_elems(htt) == 0) - ath10k_warn("htt rx ring is empty!\n"); + if (htt->rx_ring.fill_cnt == 0) { + ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n"); + return NULL; + } idx = htt->rx_ring.sw_rd_idx.msdu_payld; msdu = htt->rx_ring.netbufs_ring[idx]; + htt->rx_ring.netbufs_ring[idx] = NULL; + ath10k_htt_reset_paddrs_ring(htt, idx); idx++; idx &= htt->rx_ring.size_mask; htt->rx_ring.sw_rd_idx.msdu_payld = idx; htt->rx_ring.fill_cnt--; - spin_unlock_bh(&htt->rx_ring.lock); - return msdu; -} - -static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb) -{ - struct sk_buff *next; + dma_unmap_single(htt->ar->dev, + ATH10K_SKB_RXCB(msdu)->paddr, + msdu->len + skb_tailroom(msdu), + DMA_FROM_DEVICE); + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", + msdu->data, msdu->len + skb_tailroom(msdu)); - while (skb) { - next = skb->next; - dev_kfree_skb_any(skb); - skb = next; - } + return msdu; } +/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, - u8 **fw_desc, int *fw_desc_len, - struct sk_buff **head_msdu, - struct sk_buff **tail_msdu) + struct sk_buff_head *amsdu) { + struct ath10k *ar = htt->ar; + struct ath10k_hw_params *hw = &ar->hw_params; int msdu_len, msdu_chaining = 0; struct sk_buff *msdu; struct htt_rx_desc *rx_desc; + struct rx_attention *rx_desc_attention; + struct rx_frag_info_common *rx_desc_frag_info_common; + struct rx_msdu_start_common *rx_desc_msdu_start_common; + struct rx_msdu_end_common *rx_desc_msdu_end_common; - if (ath10k_htt_rx_ring_elems(htt) == 0) - ath10k_warn("htt rx ring is empty!\n"); - - if (htt->rx_confused) { - ath10k_warn("htt is confused. refusing rx\n"); - return 0; - } + lockdep_assert_held(&htt->rx_ring.lock); - msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt); - while (msdu) { + for (;;) { int last_msdu, msdu_len_invalid, msdu_chained; - dma_unmap_single(htt->ar->dev, - ATH10K_SKB_CB(msdu)->paddr, - msdu->len + skb_tailroom(msdu), - DMA_FROM_DEVICE); + msdu = ath10k_htt_rx_netbuf_pop(htt); + if (!msdu) { + __skb_queue_purge(amsdu); + return -ENOENT; + } - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ", - msdu->data, msdu->len + skb_tailroom(msdu)); + __skb_queue_tail(amsdu, msdu); - rx_desc = (struct htt_rx_desc *)msdu->data; + rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); + rx_desc_attention = ath10k_htt_rx_desc_get_attention(hw, rx_desc); + rx_desc_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, + rx_desc); + rx_desc_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rx_desc); + rx_desc_frag_info_common = ath10k_htt_rx_desc_get_frag_info(hw, rx_desc); /* FIXME: we must report msdu payload since this is what caller - * expects now */ - skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload)); - skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload)); + * expects now + */ + skb_put(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset); + skb_pull(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset); /* * Sanity check - confirm the HW is finished filling in the @@ -316,113 +399,54 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, * To prevent the case that we handle a stale Rx descriptor, * just assert for now until we have a way to recover. */ - if (!(__le32_to_cpu(rx_desc->attention.flags) + if (!(__le32_to_cpu(rx_desc_attention->flags) & RX_ATTENTION_FLAGS_MSDU_DONE)) { - ath10k_htt_rx_free_msdu_chain(*head_msdu); - *head_msdu = NULL; - msdu = NULL; - ath10k_err("htt rx stopped. cannot recover\n"); - htt->rx_confused = true; - break; - } - - /* - * Copy the FW rx descriptor for this MSDU from the rx - * indication message into the MSDU's netbuf. HL uses the - * same rx indication message definition as LL, and simply - * appends new info (fields from the HW rx desc, and the - * MSDU payload itself). So, the offset into the rx - * indication message only has to account for the standard - * offset of the per-MSDU FW rx desc info within the - * message, and how many bytes of the per-MSDU FW rx desc - * info have already been consumed. (And the endianness of - * the host, since for a big-endian host, the rx ind - * message contents, including the per-MSDU rx desc bytes, - * were byteswapped during upload.) - */ - if (*fw_desc_len > 0) { - rx_desc->fw_desc.info0 = **fw_desc; - /* - * The target is expected to only provide the basic - * per-MSDU rx descriptors. Just to be sure, verify - * that the target has not attached extension data - * (e.g. LRO flow ID). - */ - - /* or more, if there's extension data */ - (*fw_desc)++; - (*fw_desc_len)--; - } else { - /* - * When an oversized AMSDU happened, FW will lost - * some of MSDU status - in this case, the FW - * descriptors provided will be less than the - * actual MSDUs inside this MPDU. Mark the FW - * descriptors so that it will still deliver to - * upper stack, if no CRC error for this MPDU. - * - * FIX THIS - the FW descriptors are actually for - * MSDUs in the end of this A-MSDU instead of the - * beginning. - */ - rx_desc->fw_desc.info0 = 0; + __skb_queue_purge(amsdu); + return -EIO; } - msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags) + msdu_len_invalid = !!(__le32_to_cpu(rx_desc_attention->flags) & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); - msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0), + msdu_len = MS(__le32_to_cpu(rx_desc_msdu_start_common->info0), RX_MSDU_START_INFO0_MSDU_LENGTH); - msdu_chained = rx_desc->frag_info.ring2_more_count; + msdu_chained = rx_desc_frag_info_common->ring2_more_count; if (msdu_len_invalid) msdu_len = 0; skb_trim(msdu, 0); - skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE)); + skb_put(msdu, min(msdu_len, ath10k_htt_rx_msdu_size(hw))); msdu_len -= msdu->len; - /* FIXME: Do chained buffers include htt_rx_desc or not? */ + /* Note: Chained buffers do not contain rx descriptor */ while (msdu_chained--) { - struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt); - - dma_unmap_single(htt->ar->dev, - ATH10K_SKB_CB(next)->paddr, - next->len + skb_tailroom(next), - DMA_FROM_DEVICE); - - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ", - next->data, - next->len + skb_tailroom(next)); - - skb_trim(next, 0); - skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE)); - msdu_len -= next->len; + msdu = ath10k_htt_rx_netbuf_pop(htt); + if (!msdu) { + __skb_queue_purge(amsdu); + return -ENOENT; + } - msdu->next = next; - msdu = next; + __skb_queue_tail(amsdu, msdu); + skb_trim(msdu, 0); + skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE)); + msdu_len -= msdu->len; msdu_chaining = 1; } - if (msdu_len > 0) { - /* This may suggest FW bug? */ - ath10k_warn("htt rx msdu len not consumed (%d)\n", - msdu_len); - } - - last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & + last_msdu = __le32_to_cpu(rx_desc_msdu_end_common->info0) & RX_MSDU_END_INFO0_LAST_MSDU; - if (last_msdu) { - msdu->next = NULL; + /* FIXME: why are we skipping the first part of the rx_desc? */ + trace_ath10k_htt_rx_desc(ar, (void *)rx_desc + sizeof(u32), + hw->rx_desc_ops->rx_desc_size - sizeof(u32)); + + if (last_msdu) break; - } else { - struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt); - msdu->next = next; - msdu = next; - } } - *tail_msdu = msdu; + + if (skb_queue_empty(amsdu)) + msdu_chaining = -1; /* * Don't refill the ring yet. @@ -440,622 +464,3649 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, return msdu_chaining; } -int ath10k_htt_rx_attach(struct ath10k_htt *htt) +static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt, + u64 paddr) { + struct ath10k *ar = htt->ar; + struct ath10k_skb_rxcb *rxcb; + struct sk_buff *msdu; + + lockdep_assert_held(&htt->rx_ring.lock); + + msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr); + if (!msdu) + return NULL; + + rxcb = ATH10K_SKB_RXCB(msdu); + hash_del(&rxcb->hlist); + htt->rx_ring.fill_cnt--; + + dma_unmap_single(htt->ar->dev, rxcb->paddr, + msdu->len + skb_tailroom(msdu), + DMA_FROM_DEVICE); + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", + msdu->data, msdu->len + skb_tailroom(msdu)); + + return msdu; +} + +static inline void ath10k_htt_append_frag_list(struct sk_buff *skb_head, + struct sk_buff *frag_list, + unsigned int frag_len) +{ + skb_shinfo(skb_head)->frag_list = frag_list; + skb_head->data_len = frag_len; + skb_head->len += skb_head->data_len; +} + +static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt, + struct sk_buff *msdu, + struct htt_rx_in_ord_msdu_desc **msdu_desc) +{ + struct ath10k *ar = htt->ar; + struct ath10k_hw_params *hw = &ar->hw_params; + u32 paddr; + struct sk_buff *frag_buf; + struct sk_buff *prev_frag_buf; + u8 last_frag; + struct htt_rx_in_ord_msdu_desc *ind_desc = *msdu_desc; + struct htt_rx_desc *rxd; + int amsdu_len = __le16_to_cpu(ind_desc->msdu_len); + + rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); + trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size); + + skb_put(msdu, hw->rx_desc_ops->rx_desc_size); + skb_pull(msdu, hw->rx_desc_ops->rx_desc_size); + skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw))); + amsdu_len -= msdu->len; + + last_frag = ind_desc->reserved; + if (last_frag) { + if (amsdu_len) { + ath10k_warn(ar, "invalid amsdu len %u, left %d", + __le16_to_cpu(ind_desc->msdu_len), + amsdu_len); + } + return 0; + } + + ind_desc++; + paddr = __le32_to_cpu(ind_desc->msdu_paddr); + frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); + if (!frag_buf) { + ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%x", paddr); + return -ENOENT; + } + + skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); + ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len); + + amsdu_len -= frag_buf->len; + prev_frag_buf = frag_buf; + last_frag = ind_desc->reserved; + while (!last_frag) { + ind_desc++; + paddr = __le32_to_cpu(ind_desc->msdu_paddr); + frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); + if (!frag_buf) { + ath10k_warn(ar, "failed to pop frag-n paddr: 0x%x", + paddr); + prev_frag_buf->next = NULL; + return -ENOENT; + } + + skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); + last_frag = ind_desc->reserved; + amsdu_len -= frag_buf->len; + + prev_frag_buf->next = frag_buf; + prev_frag_buf = frag_buf; + } + + if (amsdu_len) { + ath10k_warn(ar, "invalid amsdu len %u, left %d", + __le16_to_cpu(ind_desc->msdu_len), amsdu_len); + } + + *msdu_desc = ind_desc; + + prev_frag_buf->next = NULL; + return 0; +} + +static int +ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt, + struct sk_buff *msdu, + struct htt_rx_in_ord_msdu_desc_ext **msdu_desc) +{ + struct ath10k *ar = htt->ar; + struct ath10k_hw_params *hw = &ar->hw_params; + u64 paddr; + struct sk_buff *frag_buf; + struct sk_buff *prev_frag_buf; + u8 last_frag; + struct htt_rx_in_ord_msdu_desc_ext *ind_desc = *msdu_desc; + struct htt_rx_desc *rxd; + int amsdu_len = __le16_to_cpu(ind_desc->msdu_len); + + rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); + trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size); + + skb_put(msdu, hw->rx_desc_ops->rx_desc_size); + skb_pull(msdu, hw->rx_desc_ops->rx_desc_size); + skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw))); + amsdu_len -= msdu->len; + + last_frag = ind_desc->reserved; + if (last_frag) { + if (amsdu_len) { + ath10k_warn(ar, "invalid amsdu len %u, left %d", + __le16_to_cpu(ind_desc->msdu_len), + amsdu_len); + } + return 0; + } + + ind_desc++; + paddr = __le64_to_cpu(ind_desc->msdu_paddr); + frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); + if (!frag_buf) { + ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%llx", paddr); + return -ENOENT; + } + + skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); + ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len); + + amsdu_len -= frag_buf->len; + prev_frag_buf = frag_buf; + last_frag = ind_desc->reserved; + while (!last_frag) { + ind_desc++; + paddr = __le64_to_cpu(ind_desc->msdu_paddr); + frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); + if (!frag_buf) { + ath10k_warn(ar, "failed to pop frag-n paddr: 0x%llx", + paddr); + prev_frag_buf->next = NULL; + return -ENOENT; + } + + skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); + last_frag = ind_desc->reserved; + amsdu_len -= frag_buf->len; + + prev_frag_buf->next = frag_buf; + prev_frag_buf = frag_buf; + } + + if (amsdu_len) { + ath10k_warn(ar, "invalid amsdu len %u, left %d", + __le16_to_cpu(ind_desc->msdu_len), amsdu_len); + } + + *msdu_desc = ind_desc; + + prev_frag_buf->next = NULL; + return 0; +} + +static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt, + struct htt_rx_in_ord_ind *ev, + struct sk_buff_head *list) +{ + struct ath10k *ar = htt->ar; + struct ath10k_hw_params *hw = &ar->hw_params; + struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32; + struct htt_rx_desc *rxd; + struct rx_attention *rxd_attention; + struct sk_buff *msdu; + int msdu_count, ret; + bool is_offload; + u32 paddr; + + lockdep_assert_held(&htt->rx_ring.lock); + + msdu_count = __le16_to_cpu(ev->msdu_count); + is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); + + while (msdu_count--) { + paddr = __le32_to_cpu(msdu_desc->msdu_paddr); + + msdu = ath10k_htt_rx_pop_paddr(htt, paddr); + if (!msdu) { + __skb_queue_purge(list); + return -ENOENT; + } + + if (!is_offload && ar->monitor_arvif) { + ret = ath10k_htt_rx_handle_amsdu_mon_32(htt, msdu, + &msdu_desc); + if (ret) { + __skb_queue_purge(list); + return ret; + } + __skb_queue_tail(list, msdu); + msdu_desc++; + continue; + } + + __skb_queue_tail(list, msdu); + + if (!is_offload) { + rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); + rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); + + trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size); + + skb_put(msdu, hw->rx_desc_ops->rx_desc_size); + skb_pull(msdu, hw->rx_desc_ops->rx_desc_size); + skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len)); + + if (!(__le32_to_cpu(rxd_attention->flags) & + RX_ATTENTION_FLAGS_MSDU_DONE)) { + ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); + return -EIO; + } + } + + msdu_desc++; + } + + return 0; +} + +static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt, + struct htt_rx_in_ord_ind *ev, + struct sk_buff_head *list) +{ + struct ath10k *ar = htt->ar; + struct ath10k_hw_params *hw = &ar->hw_params; + struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64; + struct htt_rx_desc *rxd; + struct rx_attention *rxd_attention; + struct sk_buff *msdu; + int msdu_count, ret; + bool is_offload; + u64 paddr; + + lockdep_assert_held(&htt->rx_ring.lock); + + msdu_count = __le16_to_cpu(ev->msdu_count); + is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); + + while (msdu_count--) { + paddr = __le64_to_cpu(msdu_desc->msdu_paddr); + msdu = ath10k_htt_rx_pop_paddr(htt, paddr); + if (!msdu) { + __skb_queue_purge(list); + return -ENOENT; + } + + if (!is_offload && ar->monitor_arvif) { + ret = ath10k_htt_rx_handle_amsdu_mon_64(htt, msdu, + &msdu_desc); + if (ret) { + __skb_queue_purge(list); + return ret; + } + __skb_queue_tail(list, msdu); + msdu_desc++; + continue; + } + + __skb_queue_tail(list, msdu); + + if (!is_offload) { + rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); + rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); + + trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size); + + skb_put(msdu, hw->rx_desc_ops->rx_desc_size); + skb_pull(msdu, hw->rx_desc_ops->rx_desc_size); + skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len)); + + if (!(__le32_to_cpu(rxd_attention->flags) & + RX_ATTENTION_FLAGS_MSDU_DONE)) { + ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); + return -EIO; + } + } + + msdu_desc++; + } + + return 0; +} + +int ath10k_htt_rx_alloc(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; dma_addr_t paddr; - void *vaddr; + void *vaddr, *vaddr_ring; + size_t size; struct timer_list *timer = &htt->rx_ring.refill_retry_timer; - htt->rx_ring.size = ath10k_htt_rx_ring_size(htt); - if (!is_power_of_2(htt->rx_ring.size)) { - ath10k_warn("htt rx ring size is not power of 2\n"); - return -EINVAL; - } + if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) + return 0; - htt->rx_ring.size_mask = htt->rx_ring.size - 1; + htt->rx_confused = false; - /* - * Set the initial value for the level to which the rx ring - * should be filled, based on the max throughput and the - * worst likely latency for the host to fill the rx ring - * with new buffers. In theory, this fill level can be - * dynamically adjusted from the initial value set here, to - * reflect the actual host latency rather than a - * conservative assumption about the host latency. + /* XXX: The fill level could be changed during runtime in response to + * the host processing latency. Is this really worth it? */ - htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt); + htt->rx_ring.size = HTT_RX_RING_SIZE; + htt->rx_ring.size_mask = htt->rx_ring.size - 1; + htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level; + + if (!is_power_of_2(htt->rx_ring.size)) { + ath10k_warn(ar, "htt rx ring size is not power of 2\n"); + return -EINVAL; + } htt->rx_ring.netbufs_ring = - kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *), + kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *), GFP_KERNEL); if (!htt->rx_ring.netbufs_ring) goto err_netbuf; - vaddr = dma_alloc_coherent(htt->ar->dev, - (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)), - &paddr, GFP_DMA); - if (!vaddr) + size = ath10k_htt_get_rx_ring_size(htt); + + vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL); + if (!vaddr_ring) goto err_dma_ring; - htt->rx_ring.paddrs_ring = vaddr; + ath10k_htt_config_paddrs_ring(htt, vaddr_ring); htt->rx_ring.base_paddr = paddr; vaddr = dma_alloc_coherent(htt->ar->dev, sizeof(*htt->rx_ring.alloc_idx.vaddr), - &paddr, GFP_DMA); + &paddr, GFP_KERNEL); if (!vaddr) goto err_dma_idx; htt->rx_ring.alloc_idx.vaddr = vaddr; htt->rx_ring.alloc_idx.paddr = paddr; - htt->rx_ring.sw_rd_idx.msdu_payld = 0; + htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask; *htt->rx_ring.alloc_idx.vaddr = 0; /* Initialize the Rx refill retry timer */ - setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt); + timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0); spin_lock_init(&htt->rx_ring.lock); htt->rx_ring.fill_cnt = 0; - if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level)) - goto err_fill_ring; + htt->rx_ring.sw_rd_idx.msdu_payld = 0; + hash_init(htt->rx_ring.skb_table); - ath10k_dbg(ATH10K_DBG_HTT, "HTT RX ring size: %d, fill_level: %d\n", + skb_queue_head_init(&htt->rx_msdus_q); + skb_queue_head_init(&htt->rx_in_ord_compl_q); + skb_queue_head_init(&htt->tx_fetch_ind_q); + atomic_set(&htt->num_mpdus_ready, 0); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", htt->rx_ring.size, htt->rx_ring.fill_level); return 0; -err_fill_ring: - ath10k_htt_rx_ring_free(htt); - dma_free_coherent(htt->ar->dev, - sizeof(*htt->rx_ring.alloc_idx.vaddr), - htt->rx_ring.alloc_idx.vaddr, - htt->rx_ring.alloc_idx.paddr); err_dma_idx: dma_free_coherent(htt->ar->dev, - (htt->rx_ring.size * - sizeof(htt->rx_ring.paddrs_ring)), - htt->rx_ring.paddrs_ring, + ath10k_htt_get_rx_ring_size(htt), + vaddr_ring, htt->rx_ring.base_paddr); + ath10k_htt_config_paddrs_ring(htt, NULL); err_dma_ring: kfree(htt->rx_ring.netbufs_ring); + htt->rx_ring.netbufs_ring = NULL; err_netbuf: return -ENOMEM; } -static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type) +static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar, + enum htt_rx_mpdu_encrypt_type type) { switch (type) { + case HTT_RX_MPDU_ENCRYPT_NONE: + return 0; case HTT_RX_MPDU_ENCRYPT_WEP40: case HTT_RX_MPDU_ENCRYPT_WEP104: - return 4; + return IEEE80211_WEP_IV_LEN; case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: - case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */ case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: - case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */ + return IEEE80211_TKIP_IV_LEN; case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: - return 8; - case HTT_RX_MPDU_ENCRYPT_NONE: - return 0; + return IEEE80211_CCMP_HDR_LEN; + case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: + return IEEE80211_CCMP_256_HDR_LEN; + case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: + case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: + return IEEE80211_GCMP_HDR_LEN; + case HTT_RX_MPDU_ENCRYPT_WEP128: + case HTT_RX_MPDU_ENCRYPT_WAPI: + break; } - ath10k_warn("unknown encryption type %d\n", type); + ath10k_warn(ar, "unsupported encryption type %d\n", type); return 0; } -static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type) +#define MICHAEL_MIC_LEN 8 + +static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar, + enum htt_rx_mpdu_encrypt_type type) { switch (type) { case HTT_RX_MPDU_ENCRYPT_NONE: case HTT_RX_MPDU_ENCRYPT_WEP40: case HTT_RX_MPDU_ENCRYPT_WEP104: + case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: + case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: + return 0; + case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: + return IEEE80211_CCMP_MIC_LEN; + case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: + return IEEE80211_CCMP_256_MIC_LEN; + case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: + case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: + return IEEE80211_GCMP_MIC_LEN; case HTT_RX_MPDU_ENCRYPT_WEP128: case HTT_RX_MPDU_ENCRYPT_WAPI: + break; + } + + ath10k_warn(ar, "unsupported encryption type %d\n", type); + return 0; +} + +static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar, + enum htt_rx_mpdu_encrypt_type type) +{ + switch (type) { + case HTT_RX_MPDU_ENCRYPT_NONE: + case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: + case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: + case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: + case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: return 0; + case HTT_RX_MPDU_ENCRYPT_WEP40: + case HTT_RX_MPDU_ENCRYPT_WEP104: + return IEEE80211_WEP_ICV_LEN; case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: - return 4; - case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: - return 8; + return IEEE80211_TKIP_ICV_LEN; + case HTT_RX_MPDU_ENCRYPT_WEP128: + case HTT_RX_MPDU_ENCRYPT_WAPI: + break; } - ath10k_warn("unknown encryption type %d\n", type); + ath10k_warn(ar, "unsupported encryption type %d\n", type); return 0; } -/* Applies for first msdu in chain, before altering it. */ -static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb) +struct amsdu_subframe_hdr { + u8 dst[ETH_ALEN]; + u8 src[ETH_ALEN]; + __be16 len; +} __packed; + +#define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63) + +static inline u8 ath10k_bw_to_mac80211_bw(u8 bw) +{ + u8 ret = 0; + + switch (bw) { + case 0: + ret = RATE_INFO_BW_20; + break; + case 1: + ret = RATE_INFO_BW_40; + break; + case 2: + ret = RATE_INFO_BW_80; + break; + case 3: + ret = RATE_INFO_BW_160; + break; + } + + return ret; +} + +static void ath10k_htt_rx_h_rates(struct ath10k *ar, + struct ieee80211_rx_status *status, + struct htt_rx_desc *rxd) { + struct ath10k_hw_params *hw = &ar->hw_params; + struct rx_attention *rxd_attention; + struct rx_mpdu_start *rxd_mpdu_start; + struct rx_mpdu_end *rxd_mpdu_end; + struct rx_msdu_start_common *rxd_msdu_start_common; + struct rx_msdu_end_common *rxd_msdu_end_common; + struct rx_ppdu_start *rxd_ppdu_start; + struct ieee80211_supported_band *sband; + u8 cck, rate, bw, sgi, mcs, nss; + u8 *rxd_msdu_payload; + u8 preamble = 0; + u8 group_id; + u32 info1, info2, info3; + u32 stbc, nsts_su; + + rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); + rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd); + rxd_mpdu_end = ath10k_htt_rx_desc_get_mpdu_end(hw, rxd); + rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd); + rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); + rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd); + rxd_msdu_payload = ath10k_htt_rx_desc_get_msdu_payload(hw, rxd); + + info1 = __le32_to_cpu(rxd_ppdu_start->info1); + info2 = __le32_to_cpu(rxd_ppdu_start->info2); + info3 = __le32_to_cpu(rxd_ppdu_start->info3); + + preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE); + + switch (preamble) { + case HTT_RX_LEGACY: + /* To get legacy rate index band is required. Since band can't + * be undefined check if freq is non-zero. + */ + if (!status->freq) + return; + + cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT; + rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE); + rate &= ~RX_PPDU_START_RATE_FLAG; + + sband = &ar->mac.sbands[status->band]; + status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck); + break; + case HTT_RX_HT: + case HTT_RX_HT_WITH_TXBF: + /* HT-SIG - Table 20-11 in info2 and info3 */ + mcs = info2 & 0x1F; + nss = mcs >> 3; + bw = (info2 >> 7) & 1; + sgi = (info3 >> 7) & 1; + + status->rate_idx = mcs; + status->encoding = RX_ENC_HT; + if (sgi) + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + if (bw) + status->bw = RATE_INFO_BW_40; + break; + case HTT_RX_VHT: + case HTT_RX_VHT_WITH_TXBF: + /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3 + * TODO check this + */ + bw = info2 & 3; + sgi = info3 & 1; + stbc = (info2 >> 3) & 1; + group_id = (info2 >> 4) & 0x3F; + + if (GROUP_ID_IS_SU_MIMO(group_id)) { + mcs = (info3 >> 4) & 0x0F; + nsts_su = ((info2 >> 10) & 0x07); + if (stbc) + nss = (nsts_su >> 2) + 1; + else + nss = (nsts_su + 1); + } else { + /* Hardware doesn't decode VHT-SIG-B into Rx descriptor + * so it's impossible to decode MCS. Also since + * firmware consumes Group Id Management frames host + * has no knowledge regarding group/user position + * mapping so it's impossible to pick the correct Nsts + * from VHT-SIG-A1. + * + * Bandwidth and SGI are valid so report the rateinfo + * on best-effort basis. + */ + mcs = 0; + nss = 1; + } + + if (mcs > 0x09) { + ath10k_warn(ar, "invalid MCS received %u\n", mcs); + ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n", + __le32_to_cpu(rxd_attention->flags), + __le32_to_cpu(rxd_mpdu_start->info0), + __le32_to_cpu(rxd_mpdu_start->info1), + __le32_to_cpu(rxd_msdu_start_common->info0), + __le32_to_cpu(rxd_msdu_start_common->info1), + rxd_ppdu_start->info0, + __le32_to_cpu(rxd_ppdu_start->info1), + __le32_to_cpu(rxd_ppdu_start->info2), + __le32_to_cpu(rxd_ppdu_start->info3), + __le32_to_cpu(rxd_ppdu_start->info4)); + + ath10k_warn(ar, "msdu end %08x mpdu end %08x\n", + __le32_to_cpu(rxd_msdu_end_common->info0), + __le32_to_cpu(rxd_mpdu_end->info0)); + + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, + "rx desc msdu payload: ", + rxd_msdu_payload, 50); + } + + status->rate_idx = mcs; + status->nss = nss; + + if (sgi) + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + + status->bw = ath10k_bw_to_mac80211_bw(bw); + status->encoding = RX_ENC_VHT; + break; + default: + break; + } +} + +static struct ieee80211_channel * +ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd) +{ + struct ath10k_hw_params *hw = &ar->hw_params; + struct rx_attention *rxd_attention; + struct rx_msdu_end_common *rxd_msdu_end_common; + struct rx_mpdu_start *rxd_mpdu_start; + struct ath10k_peer *peer; + struct ath10k_vif *arvif; + struct cfg80211_chan_def def; + u16 peer_id; + + lockdep_assert_held(&ar->data_lock); + + if (!rxd) + return NULL; + + rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); + rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); + rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd); + + if (rxd_attention->flags & + __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID)) + return NULL; + + if (!(rxd_msdu_end_common->info0 & + __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU))) + return NULL; + + peer_id = MS(__le32_to_cpu(rxd_mpdu_start->info0), + RX_MPDU_START_INFO0_PEER_IDX); + + peer = ath10k_peer_find_by_id(ar, peer_id); + if (!peer) + return NULL; + + arvif = ath10k_get_arvif(ar, peer->vdev_id); + if (WARN_ON_ONCE(!arvif)) + return NULL; + + if (ath10k_mac_vif_chan(arvif->vif, &def)) + return NULL; + + return def.chan; +} + +static struct ieee80211_channel * +ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id) +{ + struct ath10k_vif *arvif; + struct cfg80211_chan_def def; + + lockdep_assert_held(&ar->data_lock); + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->vdev_id == vdev_id && + ath10k_mac_vif_chan(arvif->vif, &def) == 0) + return def.chan; + } + + return NULL; +} + +static void +ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *conf, + void *data) +{ + struct cfg80211_chan_def *def = data; + + *def = conf->def; +} + +static struct ieee80211_channel * +ath10k_htt_rx_h_any_channel(struct ath10k *ar) +{ + struct cfg80211_chan_def def = {}; + + ieee80211_iter_chan_contexts_atomic(ar->hw, + ath10k_htt_rx_h_any_chan_iter, + &def); + + return def.chan; +} + +static bool ath10k_htt_rx_h_channel(struct ath10k *ar, + struct ieee80211_rx_status *status, + struct htt_rx_desc *rxd, + u32 vdev_id) +{ + struct ieee80211_channel *ch; + + spin_lock_bh(&ar->data_lock); + ch = ar->scan_channel; + if (!ch) + ch = ar->rx_channel; + if (!ch) + ch = ath10k_htt_rx_h_peer_channel(ar, rxd); + if (!ch) + ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id); + if (!ch) + ch = ath10k_htt_rx_h_any_channel(ar); + if (!ch) + ch = ar->tgt_oper_chan; + spin_unlock_bh(&ar->data_lock); + + if (!ch) + return false; + + status->band = ch->band; + status->freq = ch->center_freq; + + return true; +} + +static void ath10k_htt_rx_h_signal(struct ath10k *ar, + struct ieee80211_rx_status *status, + struct htt_rx_desc *rxd) +{ + struct ath10k_hw_params *hw = &ar->hw_params; + struct rx_ppdu_start *rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd); + int i; + + for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) { + status->chains &= ~BIT(i); + + if (rxd_ppdu_start->rssi_chains[i].pri20_mhz != 0x80) { + status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR + + rxd_ppdu_start->rssi_chains[i].pri20_mhz; + + status->chains |= BIT(i); + } + } + + /* FIXME: Get real NF */ + status->signal = ATH10K_DEFAULT_NOISE_FLOOR + + rxd_ppdu_start->rssi_comb; + status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; +} + +static void ath10k_htt_rx_h_mactime(struct ath10k *ar, + struct ieee80211_rx_status *status, + struct htt_rx_desc *rxd) +{ + struct ath10k_hw_params *hw = &ar->hw_params; + struct rx_ppdu_end_common *rxd_ppdu_end_common; + + rxd_ppdu_end_common = ath10k_htt_rx_desc_get_ppdu_end(hw, rxd); + + /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This + * means all prior MSDUs in a PPDU are reported to mac80211 without the + * TSF. Is it worth holding frames until end of PPDU is known? + * + * FIXME: Can we get/compute 64bit TSF? + */ + status->mactime = __le32_to_cpu(rxd_ppdu_end_common->tsf_timestamp); + status->flag |= RX_FLAG_MACTIME_END; +} + +static void ath10k_htt_rx_h_ppdu(struct ath10k *ar, + struct sk_buff_head *amsdu, + struct ieee80211_rx_status *status, + u32 vdev_id) +{ + struct sk_buff *first; + struct ath10k_hw_params *hw = &ar->hw_params; struct htt_rx_desc *rxd; - enum rx_msdu_decap_format fmt; + struct rx_attention *rxd_attention; + bool is_first_ppdu; + bool is_last_ppdu; + + if (skb_queue_empty(amsdu)) + return; + + first = skb_peek(amsdu); + rxd = HTT_RX_BUF_TO_RX_DESC(hw, + (void *)first->data - hw->rx_desc_ops->rx_desc_size); - rxd = (void *)skb->data - sizeof(*rxd); - fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), - RX_MSDU_START_INFO1_DECAP_FORMAT); + rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); - if (fmt == RX_MSDU_DECAP_RAW) - return (void *)skb->data; + is_first_ppdu = !!(rxd_attention->flags & + __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU)); + is_last_ppdu = !!(rxd_attention->flags & + __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU)); + + if (is_first_ppdu) { + /* New PPDU starts so clear out the old per-PPDU status. */ + status->freq = 0; + status->rate_idx = 0; + status->nss = 0; + status->encoding = RX_ENC_LEGACY; + status->bw = RATE_INFO_BW_20; + + status->flag &= ~RX_FLAG_MACTIME; + status->flag |= RX_FLAG_NO_SIGNAL_VAL; + + status->flag &= ~(RX_FLAG_AMPDU_IS_LAST); + status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN; + status->ampdu_reference = ar->ampdu_reference; + + ath10k_htt_rx_h_signal(ar, status, rxd); + ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id); + ath10k_htt_rx_h_rates(ar, status, rxd); + } + + if (is_last_ppdu) { + ath10k_htt_rx_h_mactime(ar, status, rxd); + + /* set ampdu last segment flag */ + status->flag |= RX_FLAG_AMPDU_IS_LAST; + ar->ampdu_reference++; + } +} + +static const char * const tid_to_ac[] = { + "BE", + "BK", + "BK", + "BE", + "VI", + "VI", + "VO", + "VO", +}; + +static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size) +{ + u8 *qc; + int tid; + + if (!ieee80211_is_data_qos(hdr->frame_control)) + return ""; + + qc = ieee80211_get_qos_ctl(hdr); + tid = *qc & IEEE80211_QOS_CTL_TID_MASK; + if (tid < 8) + snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]); else - return (void *)skb->data - RX_HTT_HDR_STATUS_LEN; + snprintf(out, size, "tid %d", tid); + + return out; } -/* This function only applies for first msdu in an msdu chain */ -static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr) +static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar, + struct ieee80211_rx_status *rx_status, + struct sk_buff *skb) { - if (ieee80211_is_data_qos(hdr->frame_control)) { - u8 *qc = ieee80211_get_qos_ctl(hdr); - if (qc[0] & 0x80) - return true; + struct ieee80211_rx_status *status; + + status = IEEE80211_SKB_RXCB(skb); + *status = *rx_status; + + skb_queue_tail(&ar->htt.rx_msdus_q, skb); +} + +static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb) +{ + struct ieee80211_rx_status *status; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + char tid[32]; + + status = IEEE80211_SKB_RXCB(skb); + + if (!(ar->filter_flags & FIF_FCSFAIL) && + status->flag & RX_FLAG_FAILED_FCS_CRC) { + ar->stats.rx_crc_err_drop++; + dev_kfree_skb_any(skb); + return; } - return false; + + ath10k_dbg(ar, ATH10K_DBG_DATA, + "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", + skb, + skb->len, + ieee80211_get_SA(hdr), + ath10k_get_tid(hdr, tid, sizeof(tid)), + is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? + "mcast" : "ucast", + IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)), + (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", + (status->encoding == RX_ENC_HT) ? "ht" : "", + (status->encoding == RX_ENC_VHT) ? "vht" : "", + (status->bw == RATE_INFO_BW_40) ? "40" : "", + (status->bw == RATE_INFO_BW_80) ? "80" : "", + (status->bw == RATE_INFO_BW_160) ? "160" : "", + status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", + status->rate_idx, + status->nss, + status->freq, + status->band, status->flag, + !!(status->flag & RX_FLAG_FAILED_FCS_CRC), + !!(status->flag & RX_FLAG_MMIC_ERROR), + !!(status->flag & RX_FLAG_AMSDU_MORE)); + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ", + skb->data, skb->len); + trace_ath10k_rx_hdr(ar, skb->data, skb->len); + trace_ath10k_rx_payload(ar, skb->data, skb->len); + + ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi); } -static int ath10k_htt_rx_amsdu(struct ath10k_htt *htt, - struct htt_rx_info *info) +static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar, + struct ieee80211_hdr *hdr) +{ + int len = ieee80211_hdrlen(hdr->frame_control); + + if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING, + ar->running_fw->fw_file.fw_features)) + len = round_up(len, 4); + + return len; +} + +static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, + struct sk_buff *msdu, + struct ieee80211_rx_status *status, + enum htt_rx_mpdu_encrypt_type enctype, + bool is_decrypted, + const u8 first_hdr[64]) { - struct htt_rx_desc *rxd; - struct sk_buff *amsdu; - struct sk_buff *first; struct ieee80211_hdr *hdr; - struct sk_buff *skb = info->skb; - enum rx_msdu_decap_format fmt; - enum htt_rx_mpdu_encrypt_type enctype; - unsigned int hdr_len; - int crypto_len; - - rxd = (void *)skb->data - sizeof(*rxd); - fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), - RX_MSDU_START_INFO1_DECAP_FORMAT); - enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), - RX_MPDU_START_INFO0_ENCRYPT_TYPE); - - /* FIXME: No idea what assumptions are safe here. Need logs */ - if ((fmt == RX_MSDU_DECAP_RAW && skb->next) || - (fmt == RX_MSDU_DECAP_8023_SNAP_LLC)) { - ath10k_htt_rx_free_msdu_chain(skb->next); - skb->next = NULL; - return -ENOTSUPP; - } - - /* A-MSDU max is a little less than 8K */ - amsdu = dev_alloc_skb(8*1024); - if (!amsdu) { - ath10k_warn("A-MSDU allocation failed\n"); - ath10k_htt_rx_free_msdu_chain(skb->next); - skb->next = NULL; - return -ENOMEM; - } - - if (fmt >= RX_MSDU_DECAP_NATIVE_WIFI) { - int hdrlen; - - hdr = (void *)rxd->rx_hdr_status; - hdrlen = ieee80211_hdrlen(hdr->frame_control); - memcpy(skb_put(amsdu, hdrlen), hdr, hdrlen); - } - - first = skb; - while (skb) { - void *decap_hdr; - int decap_len = 0; - - rxd = (void *)skb->data - sizeof(*rxd); - fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), - RX_MSDU_START_INFO1_DECAP_FORMAT); - decap_hdr = (void *)rxd->rx_hdr_status; - - if (skb == first) { - /* We receive linked A-MSDU subframe skbuffs. The - * first one contains the original 802.11 header (and - * possible crypto param) in the RX descriptor. The - * A-MSDU subframe header follows that. Each part is - * aligned to 4 byte boundary. */ - - hdr = (void *)amsdu->data; - hdr_len = ieee80211_hdrlen(hdr->frame_control); - crypto_len = ath10k_htt_rx_crypto_param_len(enctype); - - decap_hdr += roundup(hdr_len, 4); - decap_hdr += roundup(crypto_len, 4); - } + struct ath10k_hw_params *hw = &ar->hw_params; + struct htt_rx_desc *rxd; + struct rx_msdu_end_common *rxd_msdu_end_common; + size_t hdr_len; + size_t crypto_len; + bool is_first; + bool is_last; + bool msdu_limit_err; + int bytes_aligned = ar->hw_params.decap_align_bytes; + u8 *qos; + + rxd = HTT_RX_BUF_TO_RX_DESC(hw, + (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); + + rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); + is_first = !!(rxd_msdu_end_common->info0 & + __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); + is_last = !!(rxd_msdu_end_common->info0 & + __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); + + /* Delivered decapped frame: + * [802.11 header] + * [crypto param] <-- can be trimmed if !fcs_err && + * !decrypt_err && !peer_idx_invalid + * [amsdu header] <-- only if A-MSDU + * [rfc1042/llc] + * [payload] + * [FCS] <-- at end, needs to be trimmed + */ - if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) { - /* Ethernet2 decap inserts ethernet header in place of - * A-MSDU subframe header. */ - skb_pull(skb, 6 + 6 + 2); + /* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when + * deaggregate, so that unwanted MSDU-deaggregation is avoided for + * error packets. If limit exceeds, hw sends all remaining MSDUs as + * a single last MSDU with this msdu limit error set. + */ + msdu_limit_err = ath10k_htt_rx_desc_msdu_limit_error(hw, rxd); - /* A-MSDU subframe header length */ - decap_len += 6 + 6 + 2; + /* If MSDU limit error happens, then don't warn on, the partial raw MSDU + * without first MSDU is expected in that case, and handled later here. + */ + /* This probably shouldn't happen but warn just in case */ + if (WARN_ON_ONCE(!is_first && !msdu_limit_err)) + return; + + /* This probably shouldn't happen but warn just in case */ + if (WARN_ON_ONCE(!(is_first && is_last) && !msdu_limit_err)) + return; - /* Ethernet2 decap also strips the LLC/SNAP so we need - * to re-insert it. The LLC/SNAP follows A-MSDU - * subframe header. */ - /* FIXME: Not all LLCs are 8 bytes long */ - decap_len += 8; + skb_trim(msdu, msdu->len - FCS_LEN); - memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len); + /* Push original 80211 header */ + if (unlikely(msdu_limit_err)) { + hdr = (struct ieee80211_hdr *)first_hdr; + hdr_len = ieee80211_hdrlen(hdr->frame_control); + crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); + + if (ieee80211_is_data_qos(hdr->frame_control)) { + qos = ieee80211_get_qos_ctl(hdr); + qos[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; } - if (fmt == RX_MSDU_DECAP_NATIVE_WIFI) { - /* Native Wifi decap inserts regular 802.11 header - * in place of A-MSDU subframe header. */ - hdr = (struct ieee80211_hdr *)skb->data; - skb_pull(skb, ieee80211_hdrlen(hdr->frame_control)); + if (crypto_len) + memcpy(skb_push(msdu, crypto_len), + (void *)hdr + round_up(hdr_len, bytes_aligned), + crypto_len); - /* A-MSDU subframe header length */ - decap_len += 6 + 6 + 2; + memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); + } - memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len); - } + /* In most cases this will be true for sniffed frames. It makes sense + * to deliver them as-is without stripping the crypto param. This is + * necessary for software based decryption. + * + * If there's no error then the frame is decrypted. At least that is + * the case for frames that come in via fragmented rx indication. + */ + if (!is_decrypted) + return; - if (fmt == RX_MSDU_DECAP_RAW) - skb_trim(skb, skb->len - 4); /* remove FCS */ + /* The payload is decrypted so strip crypto params. Start from tail + * since hdr is used to compute some stuff. + */ - memcpy(skb_put(amsdu, skb->len), skb->data, skb->len); + hdr = (void *)msdu->data; + + /* Tail */ + if (status->flag & RX_FLAG_IV_STRIPPED) { + skb_trim(msdu, msdu->len - + ath10k_htt_rx_crypto_mic_len(ar, enctype)); + + skb_trim(msdu, msdu->len - + ath10k_htt_rx_crypto_icv_len(ar, enctype)); + } else { + /* MIC */ + if (status->flag & RX_FLAG_MIC_STRIPPED) + skb_trim(msdu, msdu->len - + ath10k_htt_rx_crypto_mic_len(ar, enctype)); + + /* ICV */ + if (status->flag & RX_FLAG_ICV_STRIPPED) + skb_trim(msdu, msdu->len - + ath10k_htt_rx_crypto_icv_len(ar, enctype)); + } - /* A-MSDU subframes are padded to 4bytes - * but relative to first subframe, not the whole MPDU */ - if (skb->next && ((decap_len + skb->len) & 3)) { - int padlen = 4 - ((decap_len + skb->len) & 3); - memset(skb_put(amsdu, padlen), 0, padlen); - } + /* MMIC */ + if ((status->flag & RX_FLAG_MMIC_STRIPPED) && + !ieee80211_has_morefrags(hdr->frame_control) && + enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) + skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN); + + /* Head */ + if (status->flag & RX_FLAG_IV_STRIPPED) { + hdr_len = ieee80211_hdrlen(hdr->frame_control); + crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); - skb = skb->next; + memmove((void *)msdu->data + crypto_len, + (void *)msdu->data, hdr_len); + skb_pull(msdu, crypto_len); } +} + +static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, + struct sk_buff *msdu, + struct ieee80211_rx_status *status, + const u8 first_hdr[64], + enum htt_rx_mpdu_encrypt_type enctype) +{ + struct ath10k_hw_params *hw = &ar->hw_params; + struct ieee80211_hdr *hdr; + struct htt_rx_desc *rxd; + size_t hdr_len; + u8 da[ETH_ALEN]; + u8 sa[ETH_ALEN]; + int l3_pad_bytes; + int bytes_aligned = ar->hw_params.decap_align_bytes; + + /* Delivered decapped frame: + * [nwifi 802.11 header] <-- replaced with 802.11 hdr + * [rfc1042/llc] + * + * Note: The nwifi header doesn't have QoS Control and is + * (always?) a 3addr frame. + * + * Note2: There's no A-MSDU subframe header. Even if it's part + * of an A-MSDU. + */ - info->skb = amsdu; - info->encrypt_type = enctype; + /* pull decapped header and copy SA & DA */ + rxd = HTT_RX_BUF_TO_RX_DESC(hw, (void *)msdu->data - + hw->rx_desc_ops->rx_desc_size); - ath10k_htt_rx_free_msdu_chain(first); + l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); + skb_put(msdu, l3_pad_bytes); - return 0; + hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes); + + hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr); + ether_addr_copy(da, ieee80211_get_DA(hdr)); + ether_addr_copy(sa, ieee80211_get_SA(hdr)); + skb_pull(msdu, hdr_len); + + /* push original 802.11 header */ + hdr = (struct ieee80211_hdr *)first_hdr; + hdr_len = ieee80211_hdrlen(hdr->frame_control); + + if (!(status->flag & RX_FLAG_IV_STRIPPED)) { + memcpy(skb_push(msdu, + ath10k_htt_rx_crypto_param_len(ar, enctype)), + (void *)hdr + round_up(hdr_len, bytes_aligned), + ath10k_htt_rx_crypto_param_len(ar, enctype)); + } + + memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); + + /* original 802.11 header has a different DA and in + * case of 4addr it may also have different SA + */ + hdr = (struct ieee80211_hdr *)msdu->data; + ether_addr_copy(ieee80211_get_DA(hdr), da); + ether_addr_copy(ieee80211_get_SA(hdr), sa); } -static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info) +static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar, + struct sk_buff *msdu, + enum htt_rx_mpdu_encrypt_type enctype) { - struct sk_buff *skb = info->skb; + struct ieee80211_hdr *hdr; + struct ath10k_hw_params *hw = &ar->hw_params; struct htt_rx_desc *rxd; + struct rx_msdu_end_common *rxd_msdu_end_common; + u8 *rxd_rx_hdr_status; + size_t hdr_len, crypto_len; + void *rfc1042; + bool is_first, is_last, is_amsdu; + int bytes_aligned = ar->hw_params.decap_align_bytes; + + rxd = HTT_RX_BUF_TO_RX_DESC(hw, + (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); + + rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); + rxd_rx_hdr_status = ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd); + hdr = (void *)rxd_rx_hdr_status; + + is_first = !!(rxd_msdu_end_common->info0 & + __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); + is_last = !!(rxd_msdu_end_common->info0 & + __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); + is_amsdu = !(is_first && is_last); + + rfc1042 = hdr; + + if (is_first) { + hdr_len = ieee80211_hdrlen(hdr->frame_control); + crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); + + rfc1042 += round_up(hdr_len, bytes_aligned) + + round_up(crypto_len, bytes_aligned); + } + + if (is_amsdu) + rfc1042 += sizeof(struct amsdu_subframe_hdr); + + return rfc1042; +} + +static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar, + struct sk_buff *msdu, + struct ieee80211_rx_status *status, + const u8 first_hdr[64], + enum htt_rx_mpdu_encrypt_type enctype) +{ + struct ath10k_hw_params *hw = &ar->hw_params; struct ieee80211_hdr *hdr; - enum rx_msdu_decap_format fmt; - enum htt_rx_mpdu_encrypt_type enctype; + struct ethhdr *eth; + size_t hdr_len; + void *rfc1042; + u8 da[ETH_ALEN]; + u8 sa[ETH_ALEN]; + int l3_pad_bytes; + struct htt_rx_desc *rxd; + int bytes_aligned = ar->hw_params.decap_align_bytes; + + /* Delivered decapped frame: + * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc + * [payload] + */ + + rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype); + if (WARN_ON_ONCE(!rfc1042)) + return; - /* This shouldn't happen. If it does than it may be a FW bug. */ - if (skb->next) { - ath10k_warn("received chained non A-MSDU frame\n"); - ath10k_htt_rx_free_msdu_chain(skb->next); - skb->next = NULL; + rxd = HTT_RX_BUF_TO_RX_DESC(hw, + (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); + + l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); + skb_put(msdu, l3_pad_bytes); + skb_pull(msdu, l3_pad_bytes); + + /* pull decapped header and copy SA & DA */ + eth = (struct ethhdr *)msdu->data; + ether_addr_copy(da, eth->h_dest); + ether_addr_copy(sa, eth->h_source); + skb_pull(msdu, sizeof(struct ethhdr)); + + /* push rfc1042/llc/snap */ + memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042, + sizeof(struct rfc1042_hdr)); + + /* push original 802.11 header */ + hdr = (struct ieee80211_hdr *)first_hdr; + hdr_len = ieee80211_hdrlen(hdr->frame_control); + + if (!(status->flag & RX_FLAG_IV_STRIPPED)) { + memcpy(skb_push(msdu, + ath10k_htt_rx_crypto_param_len(ar, enctype)), + (void *)hdr + round_up(hdr_len, bytes_aligned), + ath10k_htt_rx_crypto_param_len(ar, enctype)); } - rxd = (void *)skb->data - sizeof(*rxd); - fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), - RX_MSDU_START_INFO1_DECAP_FORMAT); - enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), - RX_MPDU_START_INFO0_ENCRYPT_TYPE); - hdr = (void *)skb->data - RX_HTT_HDR_STATUS_LEN; + memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); - switch (fmt) { + /* original 802.11 header has a different DA and in + * case of 4addr it may also have different SA + */ + hdr = (struct ieee80211_hdr *)msdu->data; + ether_addr_copy(ieee80211_get_DA(hdr), da); + ether_addr_copy(ieee80211_get_SA(hdr), sa); +} + +static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar, + struct sk_buff *msdu, + struct ieee80211_rx_status *status, + const u8 first_hdr[64], + enum htt_rx_mpdu_encrypt_type enctype) +{ + struct ath10k_hw_params *hw = &ar->hw_params; + struct ieee80211_hdr *hdr; + size_t hdr_len; + int l3_pad_bytes; + struct htt_rx_desc *rxd; + int bytes_aligned = ar->hw_params.decap_align_bytes; + + /* Delivered decapped frame: + * [amsdu header] <-- replaced with 802.11 hdr + * [rfc1042/llc] + * [payload] + */ + + rxd = HTT_RX_BUF_TO_RX_DESC(hw, + (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); + + l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); + + skb_put(msdu, l3_pad_bytes); + skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes); + + hdr = (struct ieee80211_hdr *)first_hdr; + hdr_len = ieee80211_hdrlen(hdr->frame_control); + + if (!(status->flag & RX_FLAG_IV_STRIPPED)) { + memcpy(skb_push(msdu, + ath10k_htt_rx_crypto_param_len(ar, enctype)), + (void *)hdr + round_up(hdr_len, bytes_aligned), + ath10k_htt_rx_crypto_param_len(ar, enctype)); + } + + memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); +} + +static void ath10k_htt_rx_h_undecap(struct ath10k *ar, + struct sk_buff *msdu, + struct ieee80211_rx_status *status, + u8 first_hdr[64], + enum htt_rx_mpdu_encrypt_type enctype, + bool is_decrypted) +{ + struct ath10k_hw_params *hw = &ar->hw_params; + struct htt_rx_desc *rxd; + struct rx_msdu_start_common *rxd_msdu_start_common; + enum rx_msdu_decap_format decap; + + /* First msdu's decapped header: + * [802.11 header] <-- padded to 4 bytes long + * [crypto param] <-- padded to 4 bytes long + * [amsdu header] <-- only if A-MSDU + * [rfc1042/llc] + * + * Other (2nd, 3rd, ..) msdu's decapped header: + * [amsdu header] <-- only if A-MSDU + * [rfc1042/llc] + */ + + rxd = HTT_RX_BUF_TO_RX_DESC(hw, + (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); + + rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd); + decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1), + RX_MSDU_START_INFO1_DECAP_FORMAT); + + switch (decap) { case RX_MSDU_DECAP_RAW: - /* remove trailing FCS */ - skb_trim(skb, skb->len - 4); + ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype, + is_decrypted, first_hdr); break; case RX_MSDU_DECAP_NATIVE_WIFI: - /* nothing to do here */ + ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr, + enctype); break; case RX_MSDU_DECAP_ETHERNET2_DIX: - /* macaddr[6] + macaddr[6] + ethertype[2] */ - skb_pull(skb, 6 + 6 + 2); + ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype); break; case RX_MSDU_DECAP_8023_SNAP_LLC: - /* macaddr[6] + macaddr[6] + len[2] */ - /* we don't need this for non-A-MSDU */ - skb_pull(skb, 6 + 6 + 2); + ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr, + enctype); break; } +} + +static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb) +{ + struct htt_rx_desc *rxd; + struct rx_attention *rxd_attention; + struct rx_msdu_start_common *rxd_msdu_start_common; + u32 flags, info; + bool is_ip4, is_ip6; + bool is_tcp, is_udp; + bool ip_csum_ok, tcpudp_csum_ok; + + rxd = HTT_RX_BUF_TO_RX_DESC(hw, + (void *)skb->data - hw->rx_desc_ops->rx_desc_size); + + rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); + rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd); + flags = __le32_to_cpu(rxd_attention->flags); + info = __le32_to_cpu(rxd_msdu_start_common->info1); + + is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO); + is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO); + is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO); + is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO); + ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL); + tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL); + + if (!is_ip4 && !is_ip6) + return CHECKSUM_NONE; + if (!is_tcp && !is_udp) + return CHECKSUM_NONE; + if (!ip_csum_ok) + return CHECKSUM_NONE; + if (!tcpudp_csum_ok) + return CHECKSUM_NONE; + + return CHECKSUM_UNNECESSARY; +} + +static void ath10k_htt_rx_h_csum_offload(struct ath10k_hw_params *hw, + struct sk_buff *msdu) +{ + msdu->ip_summed = ath10k_htt_rx_get_csum_state(hw, msdu); +} + +static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb, + enum htt_rx_mpdu_encrypt_type enctype) +{ + struct ieee80211_hdr *hdr; + u64 pn = 0; + u8 *ehdr; + + hdr = (struct ieee80211_hdr *)skb->data; + ehdr = skb->data + ieee80211_hdrlen(hdr->frame_control); + + if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) { + pn = ehdr[0]; + pn |= (u64)ehdr[1] << 8; + pn |= (u64)ehdr[4] << 16; + pn |= (u64)ehdr[5] << 24; + pn |= (u64)ehdr[6] << 32; + pn |= (u64)ehdr[7] << 40; + } + return pn; +} - if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) { - void *llc; - int llclen; +static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr; - llclen = 8; - llc = hdr; - llc += roundup(ieee80211_hdrlen(hdr->frame_control), 4); - llc += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4); + hdr = (struct ieee80211_hdr *)skb->data; + return !is_multicast_ether_addr(hdr->addr1); +} - skb_push(skb, llclen); - memcpy(skb->data, llc, llclen); +static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar, + struct sk_buff *skb, + u16 peer_id, + enum htt_rx_mpdu_encrypt_type enctype) +{ + struct ath10k_peer *peer; + union htt_rx_pn_t *last_pn, new_pn = {}; + struct ieee80211_hdr *hdr; + u8 tid, frag_number; + u32 seq; + + peer = ath10k_peer_find_by_id(ar, peer_id); + if (!peer) { + ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n"); + return false; } - if (fmt >= RX_MSDU_DECAP_ETHERNET2_DIX) { - int len = ieee80211_hdrlen(hdr->frame_control); - skb_push(skb, len); - memcpy(skb->data, hdr, len); + hdr = (struct ieee80211_hdr *)skb->data; + if (ieee80211_is_data_qos(hdr->frame_control)) + tid = ieee80211_get_tid(hdr); + else + tid = ATH10K_TXRX_NON_QOS_TID; + + last_pn = &peer->frag_tids_last_pn[tid]; + new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, enctype); + frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; + seq = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)); + + if (frag_number == 0) { + last_pn->pn48 = new_pn.pn48; + peer->frag_tids_seq[tid] = seq; + } else { + if (seq != peer->frag_tids_seq[tid]) + return false; + + if (new_pn.pn48 != last_pn->pn48 + 1) + return false; + + last_pn->pn48 = new_pn.pn48; } - info->skb = skb; - info->encrypt_type = enctype; - return 0; + return true; } -static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb) +static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, + struct sk_buff_head *amsdu, + struct ieee80211_rx_status *status, + bool fill_crypt_header, + u8 *rx_hdr, + enum ath10k_pkt_rx_err *err, + u16 peer_id, + bool frag) { + struct sk_buff *first; + struct sk_buff *last; + struct sk_buff *msdu, *temp; + struct ath10k_hw_params *hw = &ar->hw_params; struct htt_rx_desc *rxd; - u32 flags; + struct rx_attention *rxd_attention; + struct rx_mpdu_start *rxd_mpdu_start; - rxd = (void *)skb->data - sizeof(*rxd); - flags = __le32_to_cpu(rxd->attention.flags); + struct ieee80211_hdr *hdr; + enum htt_rx_mpdu_encrypt_type enctype; + u8 first_hdr[64]; + u8 *qos; + bool has_fcs_err; + bool has_crypto_err; + bool has_tkip_err; + bool has_peer_idx_invalid; + bool is_decrypted; + bool is_mgmt; + u32 attention; + bool frag_pn_check = true, multicast_check = true; + + if (skb_queue_empty(amsdu)) + return; - if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR) - return true; + first = skb_peek(amsdu); + rxd = HTT_RX_BUF_TO_RX_DESC(hw, + (void *)first->data - hw->rx_desc_ops->rx_desc_size); - return false; + rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); + rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd); + + is_mgmt = !!(rxd_attention->flags & + __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE)); + + enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0), + RX_MPDU_START_INFO0_ENCRYPT_TYPE); + + /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11 + * decapped header. It'll be used for undecapping of each MSDU. + */ + hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd); + memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN); + + if (rx_hdr) + memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN); + + /* Each A-MSDU subframe will use the original header as the base and be + * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl. + */ + hdr = (void *)first_hdr; + + if (ieee80211_is_data_qos(hdr->frame_control)) { + qos = ieee80211_get_qos_ctl(hdr); + qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; + } + + /* Some attention flags are valid only in the last MSDU. */ + last = skb_peek_tail(amsdu); + rxd = HTT_RX_BUF_TO_RX_DESC(hw, + (void *)last->data - hw->rx_desc_ops->rx_desc_size); + + rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); + attention = __le32_to_cpu(rxd_attention->flags); + + has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR); + has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR); + has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR); + has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID); + + /* Note: If hardware captures an encrypted frame that it can't decrypt, + * e.g. due to fcs error, missing peer or invalid key data it will + * report the frame as raw. + */ + is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE && + !has_fcs_err && + !has_crypto_err && + !has_peer_idx_invalid); + + /* Clear per-MPDU flags while leaving per-PPDU flags intact. */ + status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | + RX_FLAG_MMIC_ERROR | + RX_FLAG_DECRYPTED | + RX_FLAG_IV_STRIPPED | + RX_FLAG_ONLY_MONITOR | + RX_FLAG_MMIC_STRIPPED); + + if (has_fcs_err) + status->flag |= RX_FLAG_FAILED_FCS_CRC; + + if (has_tkip_err) + status->flag |= RX_FLAG_MMIC_ERROR; + + if (err) { + if (has_fcs_err) + *err = ATH10K_PKT_RX_ERR_FCS; + else if (has_tkip_err) + *err = ATH10K_PKT_RX_ERR_TKIP; + else if (has_crypto_err) + *err = ATH10K_PKT_RX_ERR_CRYPT; + else if (has_peer_idx_invalid) + *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL; + } + + /* Firmware reports all necessary management frames via WMI already. + * They are not reported to monitor interfaces at all so pass the ones + * coming via HTT to monitor interfaces instead. This simplifies + * matters a lot. + */ + if (is_mgmt) + status->flag |= RX_FLAG_ONLY_MONITOR; + + if (is_decrypted) { + status->flag |= RX_FLAG_DECRYPTED; + + if (likely(!is_mgmt)) + status->flag |= RX_FLAG_MMIC_STRIPPED; + + if (fill_crypt_header) + status->flag |= RX_FLAG_MIC_STRIPPED | + RX_FLAG_ICV_STRIPPED; + else + status->flag |= RX_FLAG_IV_STRIPPED; + } + + skb_queue_walk(amsdu, msdu) { + if (frag && !fill_crypt_header && is_decrypted && + enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) + frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar, + msdu, + peer_id, + enctype); + + if (frag) + multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar, + msdu); + + if (!frag_pn_check || !multicast_check) { + /* Discard the fragment with invalid PN or multicast DA + */ + temp = msdu->prev; + __skb_unlink(msdu, amsdu); + dev_kfree_skb_any(msdu); + msdu = temp; + frag_pn_check = true; + multicast_check = true; + continue; + } + + ath10k_htt_rx_h_csum_offload(&ar->hw_params, msdu); + + if (frag && !fill_crypt_header && + enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) + status->flag &= ~RX_FLAG_MMIC_STRIPPED; + + ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype, + is_decrypted); + + /* Undecapping involves copying the original 802.11 header back + * to sk_buff. If frame is protected and hardware has decrypted + * it then remove the protected bit. + */ + if (!is_decrypted) + continue; + if (is_mgmt) + continue; + + if (fill_crypt_header) + continue; + + hdr = (void *)msdu->data; + hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); + + if (frag && !fill_crypt_header && + enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) + status->flag &= ~RX_FLAG_IV_STRIPPED & + ~RX_FLAG_MMIC_STRIPPED; + } +} + +static void ath10k_htt_rx_h_enqueue(struct ath10k *ar, + struct sk_buff_head *amsdu, + struct ieee80211_rx_status *status) +{ + struct sk_buff *msdu; + struct sk_buff *first_subframe; + + first_subframe = skb_peek(amsdu); + + while ((msdu = __skb_dequeue(amsdu))) { + /* Setup per-MSDU flags */ + if (skb_queue_empty(amsdu)) + status->flag &= ~RX_FLAG_AMSDU_MORE; + else + status->flag |= RX_FLAG_AMSDU_MORE; + + if (msdu == first_subframe) { + first_subframe = NULL; + status->flag &= ~RX_FLAG_ALLOW_SAME_PN; + } else { + status->flag |= RX_FLAG_ALLOW_SAME_PN; + } + + ath10k_htt_rx_h_queue_msdu(ar, status, msdu); + } } -static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb) +static int ath10k_unchain_msdu(struct sk_buff_head *amsdu, + unsigned long *unchain_cnt) { + struct sk_buff *skb, *first; + int space; + int total_len = 0; + int amsdu_len = skb_queue_len(amsdu); + + /* TODO: Might could optimize this by using + * skb_try_coalesce or similar method to + * decrease copying, or maybe get mac80211 to + * provide a way to just receive a list of + * skb? + */ + + first = __skb_dequeue(amsdu); + + /* Allocate total length all at once. */ + skb_queue_walk(amsdu, skb) + total_len += skb->len; + + space = total_len - skb_tailroom(first); + if ((space > 0) && + (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) { + /* TODO: bump some rx-oom error stat */ + /* put it back together so we can free the + * whole list at once. + */ + __skb_queue_head(amsdu, first); + return -1; + } + + /* Walk list again, copying contents into + * msdu_head + */ + while ((skb = __skb_dequeue(amsdu))) { + skb_copy_from_linear_data(skb, skb_put(first, skb->len), + skb->len); + dev_kfree_skb_any(skb); + } + + __skb_queue_head(amsdu, first); + + *unchain_cnt += amsdu_len - 1; + + return 0; +} + +static void ath10k_htt_rx_h_unchain(struct ath10k *ar, + struct sk_buff_head *amsdu, + unsigned long *drop_cnt, + unsigned long *unchain_cnt) +{ + struct sk_buff *first; + struct ath10k_hw_params *hw = &ar->hw_params; + struct htt_rx_desc *rxd; + struct rx_msdu_start_common *rxd_msdu_start_common; + struct rx_frag_info_common *rxd_frag_info; + enum rx_msdu_decap_format decap; + + first = skb_peek(amsdu); + rxd = HTT_RX_BUF_TO_RX_DESC(hw, + (void *)first->data - hw->rx_desc_ops->rx_desc_size); + + rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd); + rxd_frag_info = ath10k_htt_rx_desc_get_frag_info(hw, rxd); + decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1), + RX_MSDU_START_INFO1_DECAP_FORMAT); + + /* FIXME: Current unchaining logic can only handle simple case of raw + * msdu chaining. If decapping is other than raw the chaining may be + * more complex and this isn't handled by the current code. Don't even + * try re-constructing such frames - it'll be pretty much garbage. + */ + if (decap != RX_MSDU_DECAP_RAW || + skb_queue_len(amsdu) != 1 + rxd_frag_info->ring2_more_count) { + *drop_cnt += skb_queue_len(amsdu); + __skb_queue_purge(amsdu); + return; + } + + ath10k_unchain_msdu(amsdu, unchain_cnt); +} + +static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar, + struct sk_buff_head *amsdu) +{ + u8 *subframe_hdr; + struct sk_buff *first; + bool is_first, is_last; + struct ath10k_hw_params *hw = &ar->hw_params; struct htt_rx_desc *rxd; - u32 flags; + struct rx_msdu_end_common *rxd_msdu_end_common; + struct rx_mpdu_start *rxd_mpdu_start; + struct ieee80211_hdr *hdr; + size_t hdr_len, crypto_len; + enum htt_rx_mpdu_encrypt_type enctype; + int bytes_aligned = ar->hw_params.decap_align_bytes; + + first = skb_peek(amsdu); + + rxd = HTT_RX_BUF_TO_RX_DESC(hw, + (void *)first->data - hw->rx_desc_ops->rx_desc_size); - rxd = (void *)skb->data - sizeof(*rxd); - flags = __le32_to_cpu(rxd->attention.flags); + rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); + rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd); + hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd); - if (flags & RX_ATTENTION_FLAGS_FCS_ERR) + is_first = !!(rxd_msdu_end_common->info0 & + __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); + is_last = !!(rxd_msdu_end_common->info0 & + __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); + + /* Return in case of non-aggregated msdu */ + if (is_first && is_last) return true; - return false; + /* First msdu flag is not set for the first msdu of the list */ + if (!is_first) + return false; + + enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0), + RX_MPDU_START_INFO0_ENCRYPT_TYPE); + + hdr_len = ieee80211_hdrlen(hdr->frame_control); + crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); + + subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) + + crypto_len; + + /* Validate if the amsdu has a proper first subframe. + * There are chances a single msdu can be received as amsdu when + * the unauthenticated amsdu flag of a QoS header + * gets flipped in non-SPP AMSDU's, in such cases the first + * subframe has llc/snap header in place of a valid da. + * return false if the da matches rfc1042 pattern + */ + if (ether_addr_equal(subframe_hdr, rfc1042_header)) + return false; + + return true; +} + +static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, + struct sk_buff_head *amsdu, + struct ieee80211_rx_status *rx_status) +{ + if (!rx_status->freq) { + ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n"); + return false; + } + + if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n"); + return false; + } + + if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) { + ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n"); + return false; + } + + return true; +} + +static void ath10k_htt_rx_h_filter(struct ath10k *ar, + struct sk_buff_head *amsdu, + struct ieee80211_rx_status *rx_status, + unsigned long *drop_cnt) +{ + if (skb_queue_empty(amsdu)) + return; + + if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status)) + return; + + if (drop_cnt) + *drop_cnt += skb_queue_len(amsdu); + + __skb_queue_purge(amsdu); +} + +static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + struct ieee80211_rx_status *rx_status = &htt->rx_status; + struct sk_buff_head amsdu; + int ret; + unsigned long drop_cnt = 0; + unsigned long unchain_cnt = 0; + unsigned long drop_cnt_filter = 0; + unsigned long msdus_to_queue, num_msdus; + enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX; + u8 first_hdr[RX_HTT_HDR_STATUS_LEN]; + + __skb_queue_head_init(&amsdu); + + spin_lock_bh(&htt->rx_ring.lock); + if (htt->rx_confused) { + spin_unlock_bh(&htt->rx_ring.lock); + return -EIO; + } + ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu); + spin_unlock_bh(&htt->rx_ring.lock); + + if (ret < 0) { + ath10k_warn(ar, "rx ring became corrupted: %d\n", ret); + __skb_queue_purge(&amsdu); + /* FIXME: It's probably a good idea to reboot the + * device instead of leaving it inoperable. + */ + htt->rx_confused = true; + return ret; + } + + num_msdus = skb_queue_len(&amsdu); + + ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff); + + /* only for ret = 1 indicates chained msdus */ + if (ret > 0) + ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt); + + ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter); + ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err, 0, + false); + msdus_to_queue = skb_queue_len(&amsdu); + ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status); + + ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err, + unchain_cnt, drop_cnt, drop_cnt_filter, + msdus_to_queue); + + return 0; +} + +static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc *rx_desc, + union htt_rx_pn_t *pn, + int pn_len_bits) +{ + switch (pn_len_bits) { + case 48: + pn->pn48 = __le32_to_cpu(rx_desc->pn_31_0) + + ((u64)(__le32_to_cpu(rx_desc->u0.pn_63_32) & 0xFFFF) << 32); + break; + case 24: + pn->pn24 = __le32_to_cpu(rx_desc->pn_31_0); + break; + } } -static void ath10k_htt_rx_handler(struct ath10k_htt *htt, - struct htt_rx_indication *rx) +static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t *new_pn, + union htt_rx_pn_t *old_pn) { - struct htt_rx_info info; + return ((new_pn->pn48 & 0xffffffffffffULL) <= + (old_pn->pn48 & 0xffffffffffffULL)); +} + +static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k *ar, + struct ath10k_peer *peer, + struct htt_rx_indication_hl *rx) +{ + bool last_pn_valid, pn_invalid = false; + enum htt_txrx_sec_cast_type sec_index; + enum htt_security_types sec_type; + union htt_rx_pn_t new_pn = {}; + struct htt_hl_rx_desc *rx_desc; + union htt_rx_pn_t *last_pn; + u32 rx_desc_info, tid; + int num_mpdu_ranges; + + lockdep_assert_held(&ar->data_lock); + + if (!peer) + return false; + + if (!(rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU)) + return false; + + num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), + HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); + + rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges]; + rx_desc_info = __le32_to_cpu(rx_desc->info); + + if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) + return false; + + tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); + last_pn_valid = peer->tids_last_pn_valid[tid]; + last_pn = &peer->tids_last_pn[tid]; + + if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST)) + sec_index = HTT_TXRX_SEC_MCAST; + else + sec_index = HTT_TXRX_SEC_UCAST; + + sec_type = peer->rx_pn[sec_index].sec_type; + ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len); + + if (sec_type != HTT_SECURITY_AES_CCMP && + sec_type != HTT_SECURITY_TKIP && + sec_type != HTT_SECURITY_TKIP_NOMIC) + return false; + + if (last_pn_valid) + pn_invalid = ath10k_htt_rx_pn_cmp48(&new_pn, last_pn); + else + peer->tids_last_pn_valid[tid] = true; + + if (!pn_invalid) + last_pn->pn48 = new_pn.pn48; + + return pn_invalid; +} + +static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt, + struct htt_rx_indication_hl *rx, + struct sk_buff *skb, + enum htt_rx_pn_check_type check_pn_type, + enum htt_rx_tkip_demic_type tkip_mic_type) +{ + struct ath10k *ar = htt->ar; + struct ath10k_peer *peer; struct htt_rx_indication_mpdu_range *mpdu_ranges; + struct fw_rx_desc_hl *fw_desc; + enum htt_txrx_sec_cast_type sec_index; + enum htt_security_types sec_type; + union htt_rx_pn_t new_pn = {}; + struct htt_hl_rx_desc *rx_desc; struct ieee80211_hdr *hdr; + struct ieee80211_rx_status *rx_status; + u16 peer_id; + u8 rx_desc_len; int num_mpdu_ranges; - int fw_desc_len; - u8 *fw_desc; - int i, j; - int ret; + size_t tot_hdr_len; + struct ieee80211_channel *ch; + bool pn_invalid, qos, first_msdu; + u32 tid, rx_desc_info; - memset(&info, 0, sizeof(info)); + peer_id = __le16_to_cpu(rx->hdr.peer_id); + tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); - fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes); - fw_desc = (u8 *)&rx->fw_desc; + spin_lock_bh(&ar->data_lock); + peer = ath10k_peer_find_by_id(ar, peer_id); + spin_unlock_bh(&ar->data_lock); + if (!peer && peer_id != HTT_INVALID_PEERID) + ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id); + + if (!peer) + return true; num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); - mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); + mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx); + fw_desc = &rx->fw_desc; + rx_desc_len = fw_desc->len; - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", - rx, sizeof(*rx) + - (sizeof(struct htt_rx_indication_mpdu_range) * - num_mpdu_ranges)); - - for (i = 0; i < num_mpdu_ranges; i++) { - info.status = mpdu_ranges[i].mpdu_range_status; - - for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) { - struct sk_buff *msdu_head, *msdu_tail; - enum htt_rx_mpdu_status status; - int msdu_chaining; - - msdu_head = NULL; - msdu_tail = NULL; - msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, - &fw_desc, - &fw_desc_len, - &msdu_head, - &msdu_tail); - - if (!msdu_head) { - ath10k_warn("htt rx no data!\n"); - continue; - } + if (fw_desc->u.bits.discard) { + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt discard mpdu\n"); + goto err; + } - if (msdu_head->len == 0) { - ath10k_dbg(ATH10K_DBG_HTT, - "htt rx dropping due to zero-len\n"); - ath10k_htt_rx_free_msdu_chain(msdu_head); - continue; - } + /* I have not yet seen any case where num_mpdu_ranges > 1. + * qcacld does not seem handle that case either, so we introduce the + * same limitation here as well. + */ + if (num_mpdu_ranges > 1) + ath10k_warn(ar, + "Unsupported number of MPDU ranges: %d, ignoring all but the first\n", + num_mpdu_ranges); + + if (mpdu_ranges->mpdu_range_status != + HTT_RX_IND_MPDU_STATUS_OK && + mpdu_ranges->mpdu_range_status != + HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) { + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt mpdu_range_status %d\n", + mpdu_ranges->mpdu_range_status); + goto err; + } - if (ath10k_htt_rx_has_decrypt_err(msdu_head)) { - ath10k_htt_rx_free_msdu_chain(msdu_head); - continue; - } + rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges]; + rx_desc_info = __le32_to_cpu(rx_desc->info); - status = info.status; + if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST)) + sec_index = HTT_TXRX_SEC_MCAST; + else + sec_index = HTT_TXRX_SEC_UCAST; - /* Skip mgmt frames while we handle this in WMI */ - if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL) { - ath10k_htt_rx_free_msdu_chain(msdu_head); - continue; - } + sec_type = peer->rx_pn[sec_index].sec_type; + first_msdu = rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU; - if (status != HTT_RX_IND_MPDU_STATUS_OK && - status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR && - !htt->ar->monitor_enabled) { - ath10k_dbg(ATH10K_DBG_HTT, - "htt rx ignoring frame w/ status %d\n", - status); - ath10k_htt_rx_free_msdu_chain(msdu_head); - continue; + ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len); + + if (check_pn_type == HTT_RX_PN_CHECK && tid >= IEEE80211_NUM_TIDS) { + spin_lock_bh(&ar->data_lock); + pn_invalid = ath10k_htt_rx_pn_check_replay_hl(ar, peer, rx); + spin_unlock_bh(&ar->data_lock); + + if (pn_invalid) + goto err; + } + + /* Strip off all headers before the MAC header before delivery to + * mac80211 + */ + tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) + + sizeof(rx->ppdu) + sizeof(rx->prefix) + + sizeof(rx->fw_desc) + + sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len; + + skb_pull(skb, tot_hdr_len); + + hdr = (struct ieee80211_hdr *)skb->data; + qos = ieee80211_is_data_qos(hdr->frame_control); + + rx_status = IEEE80211_SKB_RXCB(skb); + memset(rx_status, 0, sizeof(*rx_status)); + + if (rx->ppdu.combined_rssi == 0) { + /* SDIO firmware does not provide signal */ + rx_status->signal = 0; + rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; + } else { + rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR + + rx->ppdu.combined_rssi; + rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; + } + + spin_lock_bh(&ar->data_lock); + ch = ar->scan_channel; + if (!ch) + ch = ar->rx_channel; + if (!ch) + ch = ath10k_htt_rx_h_any_channel(ar); + if (!ch) + ch = ar->tgt_oper_chan; + spin_unlock_bh(&ar->data_lock); + + if (ch) { + rx_status->band = ch->band; + rx_status->freq = ch->center_freq; + } + if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU) + rx_status->flag &= ~RX_FLAG_AMSDU_MORE; + else + rx_status->flag |= RX_FLAG_AMSDU_MORE; + + /* Not entirely sure about this, but all frames from the chipset has + * the protected flag set even though they have already been decrypted. + * Unmasking this flag is necessary in order for mac80211 not to drop + * the frame. + * TODO: Verify this is always the case or find out a way to check + * if there has been hw decryption. + */ + if (ieee80211_has_protected(hdr->frame_control)) { + hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); + rx_status->flag |= RX_FLAG_DECRYPTED | + RX_FLAG_IV_STRIPPED | + RX_FLAG_MMIC_STRIPPED; + + if (tid < IEEE80211_NUM_TIDS && + first_msdu && + check_pn_type == HTT_RX_PN_CHECK && + (sec_type == HTT_SECURITY_AES_CCMP || + sec_type == HTT_SECURITY_TKIP || + sec_type == HTT_SECURITY_TKIP_NOMIC)) { + u8 offset, *ivp, i; + s8 keyidx = 0; + __le64 pn48 = cpu_to_le64(new_pn.pn48); + + hdr = (struct ieee80211_hdr *)skb->data; + offset = ieee80211_hdrlen(hdr->frame_control); + hdr->frame_control |= __cpu_to_le16(IEEE80211_FCTL_PROTECTED); + rx_status->flag &= ~RX_FLAG_IV_STRIPPED; + + memmove(skb->data - IEEE80211_CCMP_HDR_LEN, + skb->data, offset); + skb_push(skb, IEEE80211_CCMP_HDR_LEN); + ivp = skb->data + offset; + memset(skb->data + offset, 0, IEEE80211_CCMP_HDR_LEN); + /* Ext IV */ + ivp[IEEE80211_WEP_IV_LEN - 1] |= ATH10K_IEEE80211_EXTIV; + + for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { + if (peer->keys[i] && + peer->keys[i]->flags & IEEE80211_KEY_FLAG_PAIRWISE) + keyidx = peer->keys[i]->keyidx; } - /* FIXME: we do not support chaining yet. - * this needs investigation */ - if (msdu_chaining) { - ath10k_warn("msdu_chaining is true\n"); - ath10k_htt_rx_free_msdu_chain(msdu_head); - continue; + /* Key ID */ + ivp[IEEE80211_WEP_IV_LEN - 1] |= keyidx << 6; + + if (sec_type == HTT_SECURITY_AES_CCMP) { + rx_status->flag |= RX_FLAG_MIC_STRIPPED; + /* pn 0, pn 1 */ + memcpy(skb->data + offset, &pn48, 2); + /* pn 1, pn 3 , pn 34 , pn 5 */ + memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4); + } else { + rx_status->flag |= RX_FLAG_ICV_STRIPPED; + /* TSC 0 */ + memcpy(skb->data + offset + 2, &pn48, 1); + /* TSC 1 */ + memcpy(skb->data + offset, ((u8 *)&pn48) + 1, 1); + /* TSC 2 , TSC 3 , TSC 4 , TSC 5*/ + memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4); } + } + } - info.skb = msdu_head; - info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head); - info.signal = ATH10K_DEFAULT_NOISE_FLOOR; - info.signal += rx->ppdu.combined_rssi; + if (tkip_mic_type == HTT_RX_TKIP_MIC) + rx_status->flag &= ~RX_FLAG_IV_STRIPPED & + ~RX_FLAG_MMIC_STRIPPED; - info.rate.info0 = rx->ppdu.info0; - info.rate.info1 = __le32_to_cpu(rx->ppdu.info1); - info.rate.info2 = __le32_to_cpu(rx->ppdu.info2); + if (mpdu_ranges->mpdu_range_status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) + rx_status->flag |= RX_FLAG_MMIC_ERROR; - hdr = ath10k_htt_rx_skb_get_hdr(msdu_head); + if (!qos && tid < IEEE80211_NUM_TIDS) { + u8 offset; + __le16 qos_ctrl = 0; - if (ath10k_htt_rx_hdr_is_amsdu(hdr)) - ret = ath10k_htt_rx_amsdu(htt, &info); - else - ret = ath10k_htt_rx_msdu(htt, &info); + hdr = (struct ieee80211_hdr *)skb->data; + offset = ieee80211_hdrlen(hdr->frame_control); - if (ret && !info.fcs_err) { - ath10k_warn("error processing msdus %d\n", ret); - dev_kfree_skb_any(info.skb); - continue; + hdr->frame_control |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); + memmove(skb->data - IEEE80211_QOS_CTL_LEN, skb->data, offset); + skb_push(skb, IEEE80211_QOS_CTL_LEN); + qos_ctrl = cpu_to_le16(tid); + memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN); + } + + if (ar->napi.dev) + ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi); + else + ieee80211_rx_ni(ar->hw, skb); + + /* We have delivered the skb to the upper layers (mac80211) so we + * must not free it. + */ + return false; +err: + /* Tell the caller that it must free the skb since we have not + * consumed it + */ + return true; +} + +static int ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff *skb, + u16 head_len, + u16 hdr_len) +{ + u8 *ivp, *orig_hdr; + + orig_hdr = skb->data; + ivp = orig_hdr + hdr_len + head_len; + + /* the ExtIV bit is always set to 1 for TKIP */ + if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV)) + return -EINVAL; + + memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len); + skb_pull(skb, IEEE80211_TKIP_IV_LEN); + skb_trim(skb, skb->len - ATH10K_IEEE80211_TKIP_MICLEN); + return 0; +} + +static int ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff *skb, + u16 head_len, + u16 hdr_len) +{ + u8 *ivp, *orig_hdr; + + orig_hdr = skb->data; + ivp = orig_hdr + hdr_len + head_len; + + /* the ExtIV bit is always set to 1 for TKIP */ + if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV)) + return -EINVAL; + + memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len); + skb_pull(skb, IEEE80211_TKIP_IV_LEN); + skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN); + return 0; +} + +static int ath10k_htt_rx_frag_ccmp_decap(struct sk_buff *skb, + u16 head_len, + u16 hdr_len) +{ + u8 *ivp, *orig_hdr; + + orig_hdr = skb->data; + ivp = orig_hdr + hdr_len + head_len; + + /* the ExtIV bit is always set to 1 for CCMP */ + if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV)) + return -EINVAL; + + skb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN); + memmove(orig_hdr + IEEE80211_CCMP_HDR_LEN, orig_hdr, head_len + hdr_len); + skb_pull(skb, IEEE80211_CCMP_HDR_LEN); + return 0; +} + +static int ath10k_htt_rx_frag_wep_decap(struct sk_buff *skb, + u16 head_len, + u16 hdr_len) +{ + u8 *orig_hdr; + + orig_hdr = skb->data; + + memmove(orig_hdr + IEEE80211_WEP_IV_LEN, + orig_hdr, head_len + hdr_len); + skb_pull(skb, IEEE80211_WEP_IV_LEN); + skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN); + return 0; +} + +static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt, + struct htt_rx_fragment_indication *rx, + struct sk_buff *skb) +{ + struct ath10k *ar = htt->ar; + enum htt_rx_tkip_demic_type tkip_mic = HTT_RX_NON_TKIP_MIC; + enum htt_txrx_sec_cast_type sec_index; + struct htt_rx_indication_hl *rx_hl; + enum htt_security_types sec_type; + u32 tid, frag, seq, rx_desc_info; + union htt_rx_pn_t new_pn = {}; + struct htt_hl_rx_desc *rx_desc; + u16 peer_id, sc, hdr_space; + union htt_rx_pn_t *last_pn; + struct ieee80211_hdr *hdr; + int ret, num_mpdu_ranges; + struct ath10k_peer *peer; + struct htt_resp *resp; + size_t tot_hdr_len; + + resp = (struct htt_resp *)(skb->data + HTT_RX_FRAG_IND_INFO0_HEADER_LEN); + skb_pull(skb, HTT_RX_FRAG_IND_INFO0_HEADER_LEN); + skb_trim(skb, skb->len - FCS_LEN); + + peer_id = __le16_to_cpu(rx->peer_id); + rx_hl = (struct htt_rx_indication_hl *)(&resp->rx_ind_hl); + + spin_lock_bh(&ar->data_lock); + peer = ath10k_peer_find_by_id(ar, peer_id); + if (!peer) { + ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer: %u\n", peer_id); + goto err; + } + + num_mpdu_ranges = MS(__le32_to_cpu(rx_hl->hdr.info1), + HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); + + tot_hdr_len = sizeof(struct htt_resp_hdr) + + sizeof(rx_hl->hdr) + + sizeof(rx_hl->ppdu) + + sizeof(rx_hl->prefix) + + sizeof(rx_hl->fw_desc) + + sizeof(struct htt_rx_indication_mpdu_range) * num_mpdu_ranges; + + tid = MS(rx_hl->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); + rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len); + rx_desc_info = __le32_to_cpu(rx_desc->info); + + hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len); + + if (is_multicast_ether_addr(hdr->addr1)) { + /* Discard the fragment with multicast DA */ + goto err; + } + + if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) { + spin_unlock_bh(&ar->data_lock); + return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb, + HTT_RX_NON_PN_CHECK, + HTT_RX_NON_TKIP_MIC); + } + + if (ieee80211_has_retry(hdr->frame_control)) + goto err; + + hdr_space = ieee80211_hdrlen(hdr->frame_control); + sc = __le16_to_cpu(hdr->seq_ctrl); + seq = IEEE80211_SEQ_TO_SN(sc); + frag = sc & IEEE80211_SCTL_FRAG; + + sec_index = MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST) ? + HTT_TXRX_SEC_MCAST : HTT_TXRX_SEC_UCAST; + sec_type = peer->rx_pn[sec_index].sec_type; + ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len); + + switch (sec_type) { + case HTT_SECURITY_TKIP: + tkip_mic = HTT_RX_TKIP_MIC; + ret = ath10k_htt_rx_frag_tkip_decap_withmic(skb, + tot_hdr_len + + rx_hl->fw_desc.len, + hdr_space); + if (ret) + goto err; + break; + case HTT_SECURITY_TKIP_NOMIC: + ret = ath10k_htt_rx_frag_tkip_decap_nomic(skb, + tot_hdr_len + + rx_hl->fw_desc.len, + hdr_space); + if (ret) + goto err; + break; + case HTT_SECURITY_AES_CCMP: + ret = ath10k_htt_rx_frag_ccmp_decap(skb, + tot_hdr_len + rx_hl->fw_desc.len, + hdr_space); + if (ret) + goto err; + break; + case HTT_SECURITY_WEP128: + case HTT_SECURITY_WEP104: + case HTT_SECURITY_WEP40: + ret = ath10k_htt_rx_frag_wep_decap(skb, + tot_hdr_len + rx_hl->fw_desc.len, + hdr_space); + if (ret) + goto err; + break; + default: + break; + } + + resp = (struct htt_resp *)(skb->data); + + if (sec_type != HTT_SECURITY_AES_CCMP && + sec_type != HTT_SECURITY_TKIP && + sec_type != HTT_SECURITY_TKIP_NOMIC) { + spin_unlock_bh(&ar->data_lock); + return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb, + HTT_RX_NON_PN_CHECK, + HTT_RX_NON_TKIP_MIC); + } + + last_pn = &peer->frag_tids_last_pn[tid]; + + if (frag == 0) { + if (ath10k_htt_rx_pn_check_replay_hl(ar, peer, &resp->rx_ind_hl)) + goto err; + + last_pn->pn48 = new_pn.pn48; + peer->frag_tids_seq[tid] = seq; + } else if (sec_type == HTT_SECURITY_AES_CCMP) { + if (seq != peer->frag_tids_seq[tid]) + goto err; + + if (new_pn.pn48 != last_pn->pn48 + 1) + goto err; + + last_pn->pn48 = new_pn.pn48; + last_pn = &peer->tids_last_pn[tid]; + last_pn->pn48 = new_pn.pn48; + } + + spin_unlock_bh(&ar->data_lock); + + return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb, + HTT_RX_NON_PN_CHECK, tkip_mic); + +err: + spin_unlock_bh(&ar->data_lock); + + /* Tell the caller that it must free the skb since we have not + * consumed it + */ + return true; +} + +static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt, + struct htt_rx_indication *rx) +{ + struct ath10k *ar = htt->ar; + struct htt_rx_indication_mpdu_range *mpdu_ranges; + int num_mpdu_ranges; + int i, mpdu_count = 0; + u16 peer_id; + u8 tid; + + num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), + HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); + peer_id = __le16_to_cpu(rx->hdr.peer_id); + tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); + + mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); + + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", + rx, struct_size(rx, mpdu_ranges, num_mpdu_ranges)); + + for (i = 0; i < num_mpdu_ranges; i++) + mpdu_count += mpdu_ranges[i].mpdu_count; + + atomic_add(mpdu_count, &htt->num_mpdus_ready); + + ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges, + num_mpdu_ranges); +} + +static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, + struct sk_buff *skb) +{ + struct ath10k_htt *htt = &ar->htt; + struct htt_resp *resp = (struct htt_resp *)skb->data; + struct htt_tx_done tx_done = {}; + int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS); + __le16 msdu_id, *msdus; + bool rssi_enabled = false; + u8 msdu_count = 0, num_airtime_records, tid; + int i, htt_pad = 0; + struct htt_data_tx_compl_ppdu_dur *ppdu_info; + struct ath10k_peer *peer; + u16 ppdu_info_offset = 0, peer_id; + u32 tx_duration; + + switch (status) { + case HTT_DATA_TX_STATUS_NO_ACK: + tx_done.status = HTT_TX_COMPL_STATE_NOACK; + break; + case HTT_DATA_TX_STATUS_OK: + tx_done.status = HTT_TX_COMPL_STATE_ACK; + break; + case HTT_DATA_TX_STATUS_DISCARD: + case HTT_DATA_TX_STATUS_POSTPONE: + tx_done.status = HTT_TX_COMPL_STATE_DISCARD; + break; + default: + ath10k_warn(ar, "unhandled tx completion status %d\n", status); + tx_done.status = HTT_TX_COMPL_STATE_DISCARD; + break; + } + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", + resp->data_tx_completion.num_msdus); + + msdu_count = resp->data_tx_completion.num_msdus; + msdus = resp->data_tx_completion.msdus; + rssi_enabled = ath10k_is_rssi_enable(&ar->hw_params, resp); + + if (rssi_enabled) + htt_pad = ath10k_tx_data_rssi_get_pad_bytes(&ar->hw_params, + resp); + + for (i = 0; i < msdu_count; i++) { + msdu_id = msdus[i]; + tx_done.msdu_id = __le16_to_cpu(msdu_id); + + if (rssi_enabled) { + /* Total no of MSDUs should be even, + * if odd MSDUs are sent firmware fills + * last msdu id with 0xffff + */ + if (msdu_count & 0x01) { + msdu_id = msdus[msdu_count + i + 1 + htt_pad]; + tx_done.ack_rssi = __le16_to_cpu(msdu_id); + } else { + msdu_id = msdus[msdu_count + i + htt_pad]; + tx_done.ack_rssi = __le16_to_cpu(msdu_id); } + } - if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data)) - ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n"); + /* kfifo_put: In practice firmware shouldn't fire off per-CE + * interrupt and main interrupt (MSI/-X range case) for the same + * HTC service so it should be safe to use kfifo_put w/o lock. + * + * From kfifo_put() documentation: + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macro. + */ + if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) { + ath10k_txrx_tx_unref(htt, &tx_done); + } else if (!kfifo_put(&htt->txdone_fifo, tx_done)) { + ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n", + tx_done.msdu_id, tx_done.status); + ath10k_txrx_tx_unref(htt, &tx_done); + } + } + + if (!(resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT)) + return; + + ppdu_info_offset = (msdu_count & 0x01) ? msdu_count + 1 : msdu_count; + + if (rssi_enabled) + ppdu_info_offset += ppdu_info_offset; + + if (resp->data_tx_completion.flags2 & + (HTT_TX_CMPL_FLAG_PPID_PRESENT | HTT_TX_CMPL_FLAG_PA_PRESENT)) + ppdu_info_offset += 2; + + ppdu_info = (struct htt_data_tx_compl_ppdu_dur *)&msdus[ppdu_info_offset]; + num_airtime_records = FIELD_GET(HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK, + __le32_to_cpu(ppdu_info->info0)); - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ", - info.skb->data, info.skb->len); - ath10k_process_rx(htt->ar, &info); + for (i = 0; i < num_airtime_records; i++) { + struct htt_data_tx_ppdu_dur *ppdu_dur; + u32 info0; + + ppdu_dur = &ppdu_info->ppdu_dur[i]; + info0 = __le32_to_cpu(ppdu_dur->info0); + + peer_id = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK, + info0); + rcu_read_lock(); + spin_lock_bh(&ar->data_lock); + + peer = ath10k_peer_find_by_id(ar, peer_id); + if (!peer || !peer->sta) { + spin_unlock_bh(&ar->data_lock); + rcu_read_unlock(); + continue; } + + tid = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK, info0) & + IEEE80211_QOS_CTL_TID_MASK; + tx_duration = __le32_to_cpu(ppdu_dur->tx_duration); + + ieee80211_sta_register_airtime(peer->sta, tid, tx_duration, 0); + + spin_unlock_bh(&ar->data_lock); + rcu_read_unlock(); } +} - ath10k_htt_rx_msdu_buff_replenish(htt); +static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp) +{ + struct htt_rx_addba *ev = &resp->rx_addba; + struct ath10k_peer *peer; + struct ath10k_vif *arvif; + u16 info0, tid, peer_id; + + info0 = __le16_to_cpu(ev->info0); + tid = MS(info0, HTT_RX_BA_INFO0_TID); + peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); + + ath10k_dbg(ar, ATH10K_DBG_HTT, + "htt rx addba tid %u peer_id %u size %u\n", + tid, peer_id, ev->window_size); + + spin_lock_bh(&ar->data_lock); + peer = ath10k_peer_find_by_id(ar, peer_id); + if (!peer) { + ath10k_warn(ar, "received addba event for invalid peer_id: %u\n", + peer_id); + spin_unlock_bh(&ar->data_lock); + return; + } + + arvif = ath10k_get_arvif(ar, peer->vdev_id); + if (!arvif) { + ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", + peer->vdev_id); + spin_unlock_bh(&ar->data_lock); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_HTT, + "htt rx start rx ba session sta %pM tid %u size %u\n", + peer->addr, tid, ev->window_size); + + ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid); + spin_unlock_bh(&ar->data_lock); } -static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, - struct htt_rx_fragment_indication *frag) +static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp) { - struct sk_buff *msdu_head, *msdu_tail; + struct htt_rx_delba *ev = &resp->rx_delba; + struct ath10k_peer *peer; + struct ath10k_vif *arvif; + u16 info0, tid, peer_id; + + info0 = __le16_to_cpu(ev->info0); + tid = MS(info0, HTT_RX_BA_INFO0_TID); + peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); + + ath10k_dbg(ar, ATH10K_DBG_HTT, + "htt rx delba tid %u peer_id %u\n", + tid, peer_id); + + spin_lock_bh(&ar->data_lock); + peer = ath10k_peer_find_by_id(ar, peer_id); + if (!peer) { + ath10k_warn(ar, "received addba event for invalid peer_id: %u\n", + peer_id); + spin_unlock_bh(&ar->data_lock); + return; + } + + arvif = ath10k_get_arvif(ar, peer->vdev_id); + if (!arvif) { + ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", + peer->vdev_id); + spin_unlock_bh(&ar->data_lock); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_HTT, + "htt rx stop rx ba session sta %pM tid %u\n", + peer->addr, tid); + + ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid); + spin_unlock_bh(&ar->data_lock); +} + +static int ath10k_htt_rx_extract_amsdu(struct ath10k_hw_params *hw, + struct sk_buff_head *list, + struct sk_buff_head *amsdu) +{ + struct sk_buff *msdu; struct htt_rx_desc *rxd; - enum rx_msdu_decap_format fmt; - struct htt_rx_info info = {}; - struct ieee80211_hdr *hdr; - int msdu_chaining; - bool tkip_mic_err; - bool decrypt_err; - u8 *fw_desc; - int fw_desc_len, hdrlen, paramlen; - int trim; + struct rx_msdu_end_common *rxd_msdu_end_common; - fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes); - fw_desc = (u8 *)frag->fw_msdu_rx_desc; + if (skb_queue_empty(list)) + return -ENOBUFS; - msdu_head = NULL; - msdu_tail = NULL; - msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len, - &msdu_head, &msdu_tail); + if (WARN_ON(!skb_queue_empty(amsdu))) + return -EINVAL; + + while ((msdu = __skb_dequeue(list))) { + __skb_queue_tail(amsdu, msdu); + + rxd = HTT_RX_BUF_TO_RX_DESC(hw, + (void *)msdu->data - + hw->rx_desc_ops->rx_desc_size); + + rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); + if (rxd_msdu_end_common->info0 & + __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)) + break; + } + + msdu = skb_peek_tail(amsdu); + rxd = HTT_RX_BUF_TO_RX_DESC(hw, + (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); + + rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); + if (!(rxd_msdu_end_common->info0 & + __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) { + skb_queue_splice_init(amsdu, list); + return -EAGAIN; + } - ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); + return 0; +} - if (!msdu_head) { - ath10k_warn("htt rx frag no data\n"); +static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + + if (!ieee80211_has_protected(hdr->frame_control)) return; + + /* Offloaded frames are already decrypted but firmware insists they are + * protected in the 802.11 header. Strip the flag. Otherwise mac80211 + * will drop the frame. + */ + + hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); + status->flag |= RX_FLAG_DECRYPTED | + RX_FLAG_IV_STRIPPED | + RX_FLAG_MMIC_STRIPPED; +} + +static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar, + struct sk_buff_head *list) +{ + struct ath10k_htt *htt = &ar->htt; + struct ieee80211_rx_status *status = &htt->rx_status; + struct htt_rx_offload_msdu *rx; + struct sk_buff *msdu; + size_t offset; + + while ((msdu = __skb_dequeue(list))) { + /* Offloaded frames don't have Rx descriptor. Instead they have + * a short meta information header. + */ + + rx = (void *)msdu->data; + + skb_put(msdu, sizeof(*rx)); + skb_pull(msdu, sizeof(*rx)); + + if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) { + ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n"); + dev_kfree_skb_any(msdu); + continue; + } + + skb_put(msdu, __le16_to_cpu(rx->msdu_len)); + + /* Offloaded rx header length isn't multiple of 2 nor 4 so the + * actual payload is unaligned. Align the frame. Otherwise + * mac80211 complains. This shouldn't reduce performance much + * because these offloaded frames are rare. + */ + offset = 4 - ((unsigned long)msdu->data & 3); + skb_put(msdu, offset); + memmove(msdu->data + offset, msdu->data, msdu->len); + skb_pull(msdu, offset); + + /* FIXME: The frame is NWifi. Re-construct QoS Control + * if possible later. + */ + + memset(status, 0, sizeof(*status)); + status->flag |= RX_FLAG_NO_SIGNAL_VAL; + + ath10k_htt_rx_h_rx_offload_prot(status, msdu); + ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id); + ath10k_htt_rx_h_queue_msdu(ar, status, msdu); + } +} + +static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) +{ + struct ath10k_htt *htt = &ar->htt; + struct htt_resp *resp = (void *)skb->data; + struct ieee80211_rx_status *status = &htt->rx_status; + struct sk_buff_head list; + struct sk_buff_head amsdu; + u16 peer_id; + u16 msdu_count; + u8 vdev_id; + u8 tid; + bool offload; + bool frag; + int ret; + + lockdep_assert_held(&htt->rx_ring.lock); + + if (htt->rx_confused) + return -EIO; + + skb_pull(skb, sizeof(resp->hdr)); + skb_pull(skb, sizeof(resp->rx_in_ord_ind)); + + peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id); + msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count); + vdev_id = resp->rx_in_ord_ind.vdev_id; + tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID); + offload = !!(resp->rx_in_ord_ind.info & + HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); + frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK); + + ath10k_dbg(ar, ATH10K_DBG_HTT, + "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n", + vdev_id, peer_id, tid, offload, frag, msdu_count); + + if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) { + ath10k_warn(ar, "dropping invalid in order rx indication\n"); + return -EINVAL; } - if (msdu_chaining || msdu_head != msdu_tail) { - ath10k_warn("aggregation with fragmentation?!\n"); - ath10k_htt_rx_free_msdu_chain(msdu_head); + /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later + * extracted and processed. + */ + __skb_queue_head_init(&list); + if (ar->hw_params.target_64bit) + ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind, + &list); + else + ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind, + &list); + + if (ret < 0) { + ath10k_warn(ar, "failed to pop paddr list: %d\n", ret); + htt->rx_confused = true; + return -EIO; + } + + /* Offloaded frames are very different and need to be handled + * separately. + */ + if (offload) + ath10k_htt_rx_h_rx_offload(ar, &list); + + while (!skb_queue_empty(&list)) { + __skb_queue_head_init(&amsdu); + ret = ath10k_htt_rx_extract_amsdu(&ar->hw_params, &list, &amsdu); + switch (ret) { + case 0: + /* Note: The in-order indication may report interleaved + * frames from different PPDUs meaning reported rx rate + * to mac80211 isn't accurate/reliable. It's still + * better to report something than nothing though. This + * should still give an idea about rx rate to the user. + */ + ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id); + ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL); + ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL, + NULL, peer_id, frag); + ath10k_htt_rx_h_enqueue(ar, &amsdu, status); + break; + case -EAGAIN: + fallthrough; + default: + /* Should not happen. */ + ath10k_warn(ar, "failed to extract amsdu: %d\n", ret); + htt->rx_confused = true; + __skb_queue_purge(&list); + return -EIO; + } + } + return ret; +} + +static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar, + const __le32 *resp_ids, + int num_resp_ids) +{ + int i; + u32 resp_id; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n", + num_resp_ids); + + for (i = 0; i < num_resp_ids; i++) { + resp_id = le32_to_cpu(resp_ids[i]); + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n", + resp_id); + + /* TODO: free resp_id */ + } +} + +static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) +{ + struct ieee80211_hw *hw = ar->hw; + struct ieee80211_txq *txq; + struct htt_resp *resp = (struct htt_resp *)skb->data; + struct htt_tx_fetch_record *record; + size_t len; + size_t max_num_bytes; + size_t max_num_msdus; + size_t num_bytes; + size_t num_msdus; + const __le32 *resp_ids; + u16 num_records; + u16 num_resp_ids; + u16 peer_id; + u8 tid; + int ret; + int i; + bool may_tx; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n"); + + len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind); + if (unlikely(skb->len < len)) { + ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n"); + return; + } + + num_records = le16_to_cpu(resp->tx_fetch_ind.num_records); + num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids); + + len += sizeof(resp->tx_fetch_ind.records[0]) * num_records; + len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids; + + if (unlikely(skb->len < len)) { + ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n"); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %u num resps %u seq %u\n", + num_records, num_resp_ids, + le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num)); + + if (!ar->htt.tx_q_state.enabled) { + ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n"); + return; + } + + if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) { + ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n"); + return; + } + + rcu_read_lock(); + + for (i = 0; i < num_records; i++) { + record = &resp->tx_fetch_ind.records[i]; + peer_id = MS(le16_to_cpu(record->info), + HTT_TX_FETCH_RECORD_INFO_PEER_ID); + tid = MS(le16_to_cpu(record->info), + HTT_TX_FETCH_RECORD_INFO_TID); + max_num_msdus = le16_to_cpu(record->num_msdus); + max_num_bytes = le32_to_cpu(record->num_bytes); + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %u tid %u msdus %zu bytes %zu\n", + i, peer_id, tid, max_num_msdus, max_num_bytes); + + if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || + unlikely(tid >= ar->htt.tx_q_state.num_tids)) { + ath10k_warn(ar, "received out of range peer_id %u tid %u\n", + peer_id, tid); + continue; + } + + spin_lock_bh(&ar->data_lock); + txq = ath10k_mac_txq_lookup(ar, peer_id, tid); + spin_unlock_bh(&ar->data_lock); + + /* It is okay to release the lock and use txq because RCU read + * lock is held. + */ + + if (unlikely(!txq)) { + ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n", + peer_id, tid); + continue; + } + + num_msdus = 0; + num_bytes = 0; + + ieee80211_txq_schedule_start(hw, txq->ac); + may_tx = ieee80211_txq_may_transmit(hw, txq); + while (num_msdus < max_num_msdus && + num_bytes < max_num_bytes) { + if (!may_tx) + break; + + ret = ath10k_mac_tx_push_txq(hw, txq); + if (ret < 0) + break; + + num_msdus++; + num_bytes += ret; + } + ieee80211_return_txq(hw, txq, false); + ieee80211_txq_schedule_end(hw, txq->ac); + + record->num_msdus = cpu_to_le16(num_msdus); + record->num_bytes = cpu_to_le32(num_bytes); + + ath10k_htt_tx_txq_recalc(hw, txq); + } + + rcu_read_unlock(); + + resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind); + ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids); + + ret = ath10k_htt_tx_fetch_resp(ar, + resp->tx_fetch_ind.token, + resp->tx_fetch_ind.fetch_seq_num, + resp->tx_fetch_ind.records, + num_records); + if (unlikely(ret)) { + ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n", + le32_to_cpu(resp->tx_fetch_ind.token), ret); + /* FIXME: request fw restart */ + } + + ath10k_htt_tx_txq_sync(ar); +} + +static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar, + struct sk_buff *skb) +{ + const struct htt_resp *resp = (void *)skb->data; + size_t len; + int num_resp_ids; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n"); + + len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm); + if (unlikely(skb->len < len)) { + ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n"); + return; + } + + num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids); + len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids; + + if (unlikely(skb->len < len)) { + ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n"); + return; + } + + ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, + resp->tx_fetch_confirm.resp_ids, + num_resp_ids); +} + +static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar, + struct sk_buff *skb) +{ + const struct htt_resp *resp = (void *)skb->data; + const struct htt_tx_mode_switch_record *record; + struct ieee80211_txq *txq; + struct ath10k_txq *artxq; + size_t len; + size_t num_records; + enum htt_tx_mode_switch_mode mode; + bool enable; + u16 info0; + u16 info1; + u16 threshold; + u16 peer_id; + u8 tid; + int i; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n"); + + len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind); + if (unlikely(skb->len < len)) { + ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n"); return; } - /* FIXME: implement signal strength */ + info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0); + info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1); + + enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE); + num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD); + mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE); + threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD); + + ath10k_dbg(ar, ATH10K_DBG_HTT, + "htt rx tx mode switch ind info0 0x%04x info1 0x%04x enable %d num records %zd mode %d threshold %u\n", + info0, info1, enable, num_records, mode, threshold); + + len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records; + + if (unlikely(skb->len < len)) { + ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n"); + return; + } + + switch (mode) { + case HTT_TX_MODE_SWITCH_PUSH: + case HTT_TX_MODE_SWITCH_PUSH_PULL: + break; + default: + ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n", + mode); + return; + } + + if (!enable) + return; + + ar->htt.tx_q_state.enabled = enable; + ar->htt.tx_q_state.mode = mode; + ar->htt.tx_q_state.num_push_allowed = threshold; + + rcu_read_lock(); + + for (i = 0; i < num_records; i++) { + record = &resp->tx_mode_switch_ind.records[i]; + info0 = le16_to_cpu(record->info0); + peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID); + tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID); + + if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || + unlikely(tid >= ar->htt.tx_q_state.num_tids)) { + ath10k_warn(ar, "received out of range peer_id %u tid %u\n", + peer_id, tid); + continue; + } + + spin_lock_bh(&ar->data_lock); + txq = ath10k_mac_txq_lookup(ar, peer_id, tid); + spin_unlock_bh(&ar->data_lock); + + /* It is okay to release the lock and use txq because RCU read + * lock is held. + */ + + if (unlikely(!txq)) { + ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n", + peer_id, tid); + continue; + } + + spin_lock_bh(&ar->htt.tx_lock); + artxq = (void *)txq->drv_priv; + artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus); + spin_unlock_bh(&ar->htt.tx_lock); + } + + rcu_read_unlock(); + + ath10k_mac_tx_push_pending(ar); +} + +void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) +{ + bool release; + + release = ath10k_htt_t2h_msg_handler(ar, skb); + + /* Free the indication buffer */ + if (release) + dev_kfree_skb_any(skb); +} + +static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate) +{ + static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12, + 18, 24, 36, 48, 54}; + int i; + + for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) { + if (rate == legacy_rates[i]) + return i; + } + + ath10k_warn(ar, "Invalid legacy rate %d peer stats", rate); + return -EINVAL; +} + +static void +ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar, + struct ath10k_sta *arsta, + struct ath10k_per_peer_tx_stats *pstats, + s8 legacy_rate_idx) +{ + struct rate_info *txrate = &arsta->txrate; + struct ath10k_htt_tx_stats *tx_stats; + int idx, ht_idx, gi, mcs, bw, nss; + unsigned long flags; + + if (!arsta->tx_stats) + return; + + tx_stats = arsta->tx_stats; + flags = txrate->flags; + gi = test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT, &flags); + mcs = ATH10K_HW_MCS_RATE(pstats->ratecode); + bw = txrate->bw; + nss = txrate->nss; + ht_idx = mcs + (nss - 1) * 8; + idx = mcs * 8 + 8 * 10 * (nss - 1); + idx += bw * 2 + gi; + +#define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name] + + if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) { + STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes; + STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts; + STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes; + STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts; + STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes; + STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts; + } else if (txrate->flags & RATE_INFO_FLAGS_MCS) { + STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes; + STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts; + STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes; + STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts; + STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes; + STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts; + } else { + mcs = legacy_rate_idx; + + STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes; + STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts; + STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes; + STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts; + STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes; + STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts; + } + + if (ATH10K_HW_AMPDU(pstats->flags)) { + tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags); + + if (txrate->flags & RATE_INFO_FLAGS_MCS) { + STATS_OP_FMT(AMPDU).ht[0][ht_idx] += + pstats->succ_bytes + pstats->retry_bytes; + STATS_OP_FMT(AMPDU).ht[1][ht_idx] += + pstats->succ_pkts + pstats->retry_pkts; + } else { + STATS_OP_FMT(AMPDU).vht[0][mcs] += + pstats->succ_bytes + pstats->retry_bytes; + STATS_OP_FMT(AMPDU).vht[1][mcs] += + pstats->succ_pkts + pstats->retry_pkts; + } + STATS_OP_FMT(AMPDU).bw[0][bw] += + pstats->succ_bytes + pstats->retry_bytes; + STATS_OP_FMT(AMPDU).nss[0][nss - 1] += + pstats->succ_bytes + pstats->retry_bytes; + STATS_OP_FMT(AMPDU).gi[0][gi] += + pstats->succ_bytes + pstats->retry_bytes; + STATS_OP_FMT(AMPDU).rate_table[0][idx] += + pstats->succ_bytes + pstats->retry_bytes; + STATS_OP_FMT(AMPDU).bw[1][bw] += + pstats->succ_pkts + pstats->retry_pkts; + STATS_OP_FMT(AMPDU).nss[1][nss - 1] += + pstats->succ_pkts + pstats->retry_pkts; + STATS_OP_FMT(AMPDU).gi[1][gi] += + pstats->succ_pkts + pstats->retry_pkts; + STATS_OP_FMT(AMPDU).rate_table[1][idx] += + pstats->succ_pkts + pstats->retry_pkts; + } else { + tx_stats->ack_fails += + ATH10K_HW_BA_FAIL(pstats->flags); + } + + STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes; + STATS_OP_FMT(SUCC).nss[0][nss - 1] += pstats->succ_bytes; + STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes; + + STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts; + STATS_OP_FMT(SUCC).nss[1][nss - 1] += pstats->succ_pkts; + STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts; + + STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes; + STATS_OP_FMT(FAIL).nss[0][nss - 1] += pstats->failed_bytes; + STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes; + + STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts; + STATS_OP_FMT(FAIL).nss[1][nss - 1] += pstats->failed_pkts; + STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts; + + STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes; + STATS_OP_FMT(RETRY).nss[0][nss - 1] += pstats->retry_bytes; + STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes; + + STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts; + STATS_OP_FMT(RETRY).nss[1][nss - 1] += pstats->retry_pkts; + STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts; + + if (txrate->flags >= RATE_INFO_FLAGS_MCS) { + STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes; + STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts; + STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes; + STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts; + STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes; + STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts; + } + + tx_stats->tx_duration += pstats->duration; +} + +static void +ath10k_update_per_peer_tx_stats(struct ath10k *ar, + struct ieee80211_sta *sta, + struct ath10k_per_peer_tx_stats *peer_stats) +{ + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ieee80211_chanctx_conf *conf = NULL; + u8 rate = 0, sgi; + s8 rate_idx = 0; + bool skip_auto_rate; + struct rate_info txrate; + + lockdep_assert_held(&ar->data_lock); + + txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode); + txrate.bw = ATH10K_HW_BW(peer_stats->flags); + txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode); + txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode); + sgi = ATH10K_HW_GI(peer_stats->flags); + skip_auto_rate = ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats->flags); + + /* Firmware's rate control skips broadcast/management frames, + * if host has configure fixed rates and in some other special cases. + */ + if (skip_auto_rate) + return; + + if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) { + ath10k_warn(ar, "Invalid VHT mcs %d peer stats", txrate.mcs); + return; + } + + if (txrate.flags == WMI_RATE_PREAMBLE_HT && + (txrate.mcs > 7 || txrate.nss < 1)) { + ath10k_warn(ar, "Invalid HT mcs %d nss %d peer stats", + txrate.mcs, txrate.nss); + return; + } + + memset(&arsta->txrate, 0, sizeof(arsta->txrate)); + memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status)); + if (txrate.flags == WMI_RATE_PREAMBLE_CCK || + txrate.flags == WMI_RATE_PREAMBLE_OFDM) { + rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode); + /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */ + if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK) + rate = 5; + rate_idx = ath10k_get_legacy_rate_idx(ar, rate); + if (rate_idx < 0) + return; + arsta->txrate.legacy = rate; + } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) { + arsta->txrate.flags = RATE_INFO_FLAGS_MCS; + arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1); + } else { + arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; + arsta->txrate.mcs = txrate.mcs; + } + + switch (txrate.flags) { + case WMI_RATE_PREAMBLE_OFDM: + if (arsta->arvif && arsta->arvif->vif) + conf = rcu_dereference(arsta->arvif->vif->bss_conf.chanctx_conf); + if (conf && conf->def.chan->band == NL80211_BAND_5GHZ) + arsta->tx_info.status.rates[0].idx = rate_idx - 4; + break; + case WMI_RATE_PREAMBLE_CCK: + arsta->tx_info.status.rates[0].idx = rate_idx; + if (sgi) + arsta->tx_info.status.rates[0].flags |= + (IEEE80211_TX_RC_USE_SHORT_PREAMBLE | + IEEE80211_TX_RC_SHORT_GI); + break; + case WMI_RATE_PREAMBLE_HT: + arsta->tx_info.status.rates[0].idx = + txrate.mcs + ((txrate.nss - 1) * 8); + if (sgi) + arsta->tx_info.status.rates[0].flags |= + IEEE80211_TX_RC_SHORT_GI; + arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS; + break; + case WMI_RATE_PREAMBLE_VHT: + ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0], + txrate.mcs, txrate.nss); + if (sgi) + arsta->tx_info.status.rates[0].flags |= + IEEE80211_TX_RC_SHORT_GI; + arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS; + break; + } + + arsta->txrate.nss = txrate.nss; + arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw); + arsta->last_tx_bitrate = cfg80211_calculate_bitrate(&arsta->txrate); + if (sgi) + arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + + switch (arsta->txrate.bw) { + case RATE_INFO_BW_40: + arsta->tx_info.status.rates[0].flags |= + IEEE80211_TX_RC_40_MHZ_WIDTH; + break; + case RATE_INFO_BW_80: + arsta->tx_info.status.rates[0].flags |= + IEEE80211_TX_RC_80_MHZ_WIDTH; + break; + case RATE_INFO_BW_160: + arsta->tx_info.status.rates[0].flags |= + IEEE80211_TX_RC_160_MHZ_WIDTH; + break; + } - hdr = (struct ieee80211_hdr *)msdu_head->data; - rxd = (void *)msdu_head->data - sizeof(*rxd); - tkip_mic_err = !!(__le32_to_cpu(rxd->attention.flags) & - RX_ATTENTION_FLAGS_TKIP_MIC_ERR); - decrypt_err = !!(__le32_to_cpu(rxd->attention.flags) & - RX_ATTENTION_FLAGS_DECRYPT_ERR); - fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), - RX_MSDU_START_INFO1_DECAP_FORMAT); + if (peer_stats->succ_pkts) { + arsta->tx_info.flags = IEEE80211_TX_STAT_ACK; + arsta->tx_info.status.rates[0].count = 1; + ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info); + } - if (fmt != RX_MSDU_DECAP_RAW) { - ath10k_warn("we dont support non-raw fragmented rx yet\n"); - dev_kfree_skb_any(msdu_head); - goto end; + if (ar->htt.disable_tx_comp) { + arsta->tx_failed += peer_stats->failed_pkts; + ath10k_dbg(ar, ATH10K_DBG_HTT, "tx failed %d\n", + arsta->tx_failed); } - info.skb = msdu_head; - info.status = HTT_RX_IND_MPDU_STATUS_OK; - info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0), - RX_MPDU_START_INFO0_ENCRYPT_TYPE); + arsta->tx_retries += peer_stats->retry_pkts; + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d", arsta->tx_retries); - if (tkip_mic_err) { - ath10k_warn("tkip mic error\n"); - info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR; + if (ath10k_debug_is_extd_tx_stats_enabled(ar)) + ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats, + rate_idx); +} + +static void ath10k_htt_fetch_peer_stats(struct ath10k *ar, + struct sk_buff *skb) +{ + struct htt_resp *resp = (struct htt_resp *)skb->data; + struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats; + struct htt_per_peer_tx_stats_ind *tx_stats; + struct ieee80211_sta *sta; + struct ath10k_peer *peer; + int peer_id, i; + u8 ppdu_len, num_ppdu; + + num_ppdu = resp->peer_tx_stats.num_ppdu; + ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32); + + if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) { + ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len); + return; } - if (decrypt_err) { - ath10k_warn("decryption err in fragmented rx\n"); - dev_kfree_skb_any(info.skb); - goto end; + tx_stats = (struct htt_per_peer_tx_stats_ind *) + (resp->peer_tx_stats.payload); + peer_id = __le16_to_cpu(tx_stats->peer_id); + + rcu_read_lock(); + spin_lock_bh(&ar->data_lock); + peer = ath10k_peer_find_by_id(ar, peer_id); + if (!peer || !peer->sta) { + ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n", + peer_id); + goto out; } - if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) { - hdrlen = ieee80211_hdrlen(hdr->frame_control); - paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type); + sta = peer->sta; + for (i = 0; i < num_ppdu; i++) { + tx_stats = (struct htt_per_peer_tx_stats_ind *) + (resp->peer_tx_stats.payload + i * ppdu_len); + + p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes); + p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes); + p_tx_stats->failed_bytes = + __le32_to_cpu(tx_stats->failed_bytes); + p_tx_stats->ratecode = tx_stats->ratecode; + p_tx_stats->flags = tx_stats->flags; + p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts); + p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts); + p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts); + p_tx_stats->duration = __le16_to_cpu(tx_stats->tx_duration); + + ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats); + } - /* It is more efficient to move the header than the payload */ - memmove((void *)info.skb->data + paramlen, - (void *)info.skb->data, - hdrlen); - skb_pull(info.skb, paramlen); - hdr = (struct ieee80211_hdr *)info.skb->data; +out: + spin_unlock_bh(&ar->data_lock); + rcu_read_unlock(); +} + +static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data) +{ + struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data; + struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats; + struct ath10k_10_2_peer_tx_stats *tx_stats; + struct ieee80211_sta *sta; + struct ath10k_peer *peer; + u16 log_type = __le16_to_cpu(hdr->log_type); + u32 peer_id = 0, i; + + if (log_type != ATH_PKTLOG_TYPE_TX_STAT) + return; + + tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) + + ATH10K_10_2_TX_STATS_OFFSET); + + if (!tx_stats->tx_ppdu_cnt) + return; + + peer_id = tx_stats->peer_id; + + rcu_read_lock(); + spin_lock_bh(&ar->data_lock); + peer = ath10k_peer_find_by_id(ar, peer_id); + if (!peer || !peer->sta) { + ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n", + peer_id); + goto out; } - /* remove trailing FCS */ - trim = 4; + sta = peer->sta; + for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) { + p_tx_stats->succ_bytes = + __le16_to_cpu(tx_stats->success_bytes[i]); + p_tx_stats->retry_bytes = + __le16_to_cpu(tx_stats->retry_bytes[i]); + p_tx_stats->failed_bytes = + __le16_to_cpu(tx_stats->failed_bytes[i]); + p_tx_stats->ratecode = tx_stats->ratecode[i]; + p_tx_stats->flags = tx_stats->flags[i]; + p_tx_stats->succ_pkts = tx_stats->success_pkts[i]; + p_tx_stats->retry_pkts = tx_stats->retry_pkts[i]; + p_tx_stats->failed_pkts = tx_stats->failed_pkts[i]; + + ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats); + } + spin_unlock_bh(&ar->data_lock); + rcu_read_unlock(); - /* remove crypto trailer */ - trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type); + return; - /* last fragment of TKIP frags has MIC */ - if (!ieee80211_has_morefrags(hdr->frame_control) && - info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) - trim += 8; +out: + spin_unlock_bh(&ar->data_lock); + rcu_read_unlock(); +} - if (trim > info.skb->len) { - ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n"); - dev_kfree_skb_any(info.skb); - goto end; +static int ath10k_htt_rx_pn_len(enum htt_security_types sec_type) +{ + switch (sec_type) { + case HTT_SECURITY_TKIP: + case HTT_SECURITY_TKIP_NOMIC: + case HTT_SECURITY_AES_CCMP: + return 48; + default: + return 0; } +} - skb_trim(info.skb, info.skb->len - trim); +static void ath10k_htt_rx_sec_ind_handler(struct ath10k *ar, + struct htt_security_indication *ev) +{ + enum htt_txrx_sec_cast_type sec_index; + enum htt_security_types sec_type; + struct ath10k_peer *peer; - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt frag mpdu: ", - info.skb->data, info.skb->len); - ath10k_process_rx(htt->ar, &info); + spin_lock_bh(&ar->data_lock); -end: - if (fw_desc_len > 0) { - ath10k_dbg(ATH10K_DBG_HTT, - "expecting more fragmented rx in one indication %d\n", - fw_desc_len); + peer = ath10k_peer_find_by_id(ar, __le16_to_cpu(ev->peer_id)); + if (!peer) { + ath10k_warn(ar, "failed to find peer id %d for security indication", + __le16_to_cpu(ev->peer_id)); + goto out; } + + sec_type = MS(ev->flags, HTT_SECURITY_TYPE); + + if (ev->flags & HTT_SECURITY_IS_UNICAST) + sec_index = HTT_TXRX_SEC_UCAST; + else + sec_index = HTT_TXRX_SEC_MCAST; + + peer->rx_pn[sec_index].sec_type = sec_type; + peer->rx_pn[sec_index].pn_len = ath10k_htt_rx_pn_len(sec_type); + + memset(peer->tids_last_pn_valid, 0, sizeof(peer->tids_last_pn_valid)); + memset(peer->tids_last_pn, 0, sizeof(peer->tids_last_pn)); + +out: + spin_unlock_bh(&ar->data_lock); } -void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) +bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) { - struct ath10k_htt *htt = ar->htt; + struct ath10k_htt *htt = &ar->htt; struct htt_resp *resp = (struct htt_resp *)skb->data; + enum htt_t2h_msg_type type; /* confirm alignment */ if (!IS_ALIGNED((unsigned long)skb->data, 4)) - ath10k_warn("unaligned htt message, expect trouble\n"); + ath10k_warn(ar, "unaligned htt message, expect trouble\n"); - ath10k_dbg(ATH10K_DBG_HTT, "HTT RX, msg_type: 0x%0X\n", + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n", resp->hdr.msg_type); - switch (resp->hdr.msg_type) { + + if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) { + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X", + resp->hdr.msg_type, ar->htt.t2h_msg_types_max); + return true; + } + type = ar->htt.t2h_msg_types[resp->hdr.msg_type]; + + switch (type) { case HTT_T2H_MSG_TYPE_VERSION_CONF: { htt->target_version_major = resp->ver_resp.major; htt->target_version_minor = resp->ver_resp.minor; complete(&htt->target_version_received); break; } - case HTT_T2H_MSG_TYPE_RX_IND: { - ath10k_htt_rx_handler(htt, &resp->rx_ind); + case HTT_T2H_MSG_TYPE_RX_IND: + if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) { + ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind); + } else { + skb_queue_tail(&htt->rx_indication_head, skb); + return false; + } break; - } case HTT_T2H_MSG_TYPE_PEER_MAP: { struct htt_peer_map_event ev = { .vdev_id = resp->peer_map.vdev_id, @@ -1074,65 +4125,57 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) } case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: { struct htt_tx_done tx_done = {}; + struct ath10k_htt *htt = &ar->htt; + struct ath10k_htc *htc = &ar->htc; + struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid]; int status = __le32_to_cpu(resp->mgmt_tx_completion.status); + int info = __le32_to_cpu(resp->mgmt_tx_completion.info); - tx_done.msdu_id = - __le32_to_cpu(resp->mgmt_tx_completion.desc_id); + tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id); switch (status) { case HTT_MGMT_TX_STATUS_OK: + tx_done.status = HTT_TX_COMPL_STATE_ACK; + if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, + ar->wmi.svc_map) && + (resp->mgmt_tx_completion.flags & + HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) { + tx_done.ack_rssi = + FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK, + info); + } break; case HTT_MGMT_TX_STATUS_RETRY: - tx_done.no_ack = true; + tx_done.status = HTT_TX_COMPL_STATE_NOACK; break; case HTT_MGMT_TX_STATUS_DROP: - tx_done.discard = true; + tx_done.status = HTT_TX_COMPL_STATE_DISCARD; break; } - ath10k_txrx_tx_completed(htt, &tx_done); - break; - } - case HTT_T2H_MSG_TYPE_TX_COMPL_IND: { - struct htt_tx_done tx_done = {}; - int status = MS(resp->data_tx_completion.flags, - HTT_DATA_TX_STATUS); - __le16 msdu_id; - int i; - - switch (status) { - case HTT_DATA_TX_STATUS_NO_ACK: - tx_done.no_ack = true; - break; - case HTT_DATA_TX_STATUS_OK: - break; - case HTT_DATA_TX_STATUS_DISCARD: - case HTT_DATA_TX_STATUS_POSTPONE: - case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL: - tx_done.discard = true; - break; - default: - ath10k_warn("unhandled tx completion status %d\n", - status); - tx_done.discard = true; - break; + if (htt->disable_tx_comp) { + spin_lock_bh(&htc->tx_lock); + ep->tx_credits++; + spin_unlock_bh(&htc->tx_lock); } - ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", - resp->data_tx_completion.num_msdus); - - for (i = 0; i < resp->data_tx_completion.num_msdus; i++) { - msdu_id = resp->data_tx_completion.msdus[i]; - tx_done.msdu_id = __le16_to_cpu(msdu_id); - ath10k_txrx_tx_completed(htt, &tx_done); + status = ath10k_txrx_tx_unref(htt, &tx_done); + if (!status) { + spin_lock_bh(&htt->tx_lock); + ath10k_htt_tx_mgmt_dec_pending(htt); + spin_unlock_bh(&htt->tx_lock); } break; } + case HTT_T2H_MSG_TYPE_TX_COMPL_IND: + ath10k_htt_rx_tx_compl_ind(htt->ar, skb); + break; case HTT_T2H_MSG_TYPE_SEC_IND: { struct ath10k *ar = htt->ar; struct htt_security_indication *ev = &resp->security_indication; - ath10k_dbg(ATH10K_DBG_HTT, + ath10k_htt_rx_sec_ind_handler(ar, ev); + ath10k_dbg(ar, ATH10K_DBG_HTT, "sec ind peer_id %d unicast %d type %d\n", __le16_to_cpu(ev->peer_id), !!(ev->flags & HTT_SECURITY_IS_UNICAST), @@ -1141,27 +4184,291 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; } case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", skb->data, skb->len); - ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind); - break; + atomic_inc(&htt->num_mpdus_ready); + + return ath10k_htt_rx_proc_rx_frag_ind(htt, + &resp->rx_frag_ind, + skb); } case HTT_T2H_MSG_TYPE_TEST: - /* FIX THIS */ break; - case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: case HTT_T2H_MSG_TYPE_STATS_CONF: + trace_ath10k_htt_stats(ar, skb->data, skb->len); + break; + case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: + /* Firmware can return tx frames if it's unable to fully + * process them and suspects host may be able to fix it. ath10k + * sends all tx frames as already inspected so this shouldn't + * happen unless fw has a bug. + */ + ath10k_warn(ar, "received an unexpected htt tx inspect event\n"); + break; case HTT_T2H_MSG_TYPE_RX_ADDBA: + ath10k_htt_rx_addba(ar, resp); + break; case HTT_T2H_MSG_TYPE_RX_DELBA: - case HTT_T2H_MSG_TYPE_RX_FLUSH: + ath10k_htt_rx_delba(ar, resp); + break; + case HTT_T2H_MSG_TYPE_PKTLOG: { + trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload, + skb->len - + offsetof(struct htt_resp, + pktlog_msg.payload)); + + if (ath10k_peer_stats_enabled(ar)) + ath10k_fetch_10_2_tx_stats(ar, + resp->pktlog_msg.payload); + break; + } + case HTT_T2H_MSG_TYPE_RX_FLUSH: { + /* Ignore this event because mac80211 takes care of Rx + * aggregation reordering. + */ + break; + } + case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: { + skb_queue_tail(&htt->rx_in_ord_compl_q, skb); + return false; + } + case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: { + struct ath10k_htt *htt = &ar->htt; + struct ath10k_htc *htc = &ar->htc; + struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid]; + u32 msg_word = __le32_to_cpu(*(__le32 *)resp); + int htt_credit_delta; + + htt_credit_delta = HTT_TX_CREDIT_DELTA_ABS_GET(msg_word); + if (HTT_TX_CREDIT_SIGN_BIT_GET(msg_word)) + htt_credit_delta = -htt_credit_delta; + + ath10k_dbg(ar, ATH10K_DBG_HTT, + "htt credit update delta %d\n", + htt_credit_delta); + + if (htt->disable_tx_comp) { + spin_lock_bh(&htc->tx_lock); + ep->tx_credits += htt_credit_delta; + spin_unlock_bh(&htc->tx_lock); + ath10k_dbg(ar, ATH10K_DBG_HTT, + "htt credit total %d\n", + ep->tx_credits); + ep->ep_ops.ep_tx_credits(htc->ar); + } + break; + } + case HTT_T2H_MSG_TYPE_CHAN_CHANGE: { + u32 phymode = __le32_to_cpu(resp->chan_change.phymode); + u32 freq = __le32_to_cpu(resp->chan_change.freq); + + ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq); + ath10k_dbg(ar, ATH10K_DBG_HTT, + "htt chan change freq %u phymode %s\n", + freq, ath10k_wmi_phymode_str(phymode)); + break; + } + case HTT_T2H_MSG_TYPE_AGGR_CONF: + break; + case HTT_T2H_MSG_TYPE_TX_FETCH_IND: { + struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC); + + if (!tx_fetch_ind) { + ath10k_warn(ar, "failed to copy htt tx fetch ind\n"); + break; + } + skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind); + break; + } + case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM: + ath10k_htt_rx_tx_fetch_confirm(ar, skb); + break; + case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND: + ath10k_htt_rx_tx_mode_switch_ind(ar, skb); + break; + case HTT_T2H_MSG_TYPE_PEER_STATS: + ath10k_htt_fetch_peer_stats(ar, skb); + break; + case HTT_T2H_MSG_TYPE_EN_STATS: default: - ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n", - resp->hdr.msg_type); - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", + ath10k_warn(ar, "htt event (%d) not handled\n", + resp->hdr.msg_type); + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", skb->data, skb->len); break; - }; + } + return true; +} +EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler); - /* Free the indication buffer */ +void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar, + struct sk_buff *skb) +{ + trace_ath10k_htt_pktlog(ar, skb->data, skb->len); dev_kfree_skb_any(skb); } +EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler); + +static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget) +{ + struct sk_buff *skb; + + while (quota < budget) { + if (skb_queue_empty(&ar->htt.rx_msdus_q)) + break; + + skb = skb_dequeue(&ar->htt.rx_msdus_q); + if (!skb) + break; + ath10k_process_rx(ar, skb); + quota++; + } + + return quota; +} + +int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget) +{ + struct htt_resp *resp; + struct ath10k_htt *htt = &ar->htt; + struct sk_buff *skb; + bool release; + int quota; + + for (quota = 0; quota < budget; quota++) { + skb = skb_dequeue(&htt->rx_indication_head); + if (!skb) + break; + + resp = (struct htt_resp *)skb->data; + + release = ath10k_htt_rx_proc_rx_ind_hl(htt, + &resp->rx_ind_hl, + skb, + HTT_RX_PN_CHECK, + HTT_RX_NON_TKIP_MIC); + + if (release) + dev_kfree_skb_any(skb); + + ath10k_dbg(ar, ATH10K_DBG_HTT, "rx indication poll pending count:%d\n", + skb_queue_len(&htt->rx_indication_head)); + } + return quota; +} +EXPORT_SYMBOL(ath10k_htt_rx_hl_indication); + +int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget) +{ + struct ath10k_htt *htt = &ar->htt; + struct htt_tx_done tx_done = {}; + struct sk_buff_head tx_ind_q; + struct sk_buff *skb; + unsigned long flags; + int quota = 0, done, ret; + bool resched_napi = false; + + __skb_queue_head_init(&tx_ind_q); + + /* Process pending frames before dequeuing more data + * from hardware. + */ + quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget); + if (quota == budget) { + resched_napi = true; + goto exit; + } + + while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) { + spin_lock_bh(&htt->rx_ring.lock); + ret = ath10k_htt_rx_in_ord_ind(ar, skb); + spin_unlock_bh(&htt->rx_ring.lock); + + dev_kfree_skb_any(skb); + if (ret == -EIO) { + resched_napi = true; + goto exit; + } + } + + while (atomic_read(&htt->num_mpdus_ready)) { + ret = ath10k_htt_rx_handle_amsdu(htt); + if (ret == -EIO) { + resched_napi = true; + goto exit; + } + atomic_dec(&htt->num_mpdus_ready); + } + + /* Deliver received data after processing data from hardware */ + quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget); + + /* From NAPI documentation: + * The napi poll() function may also process TX completions, in which + * case if it processes the entire TX ring then it should count that + * work as the rest of the budget. + */ + if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo)) + quota = budget; + + /* kfifo_get: called only within txrx_tasklet so it's neatly serialized. + * From kfifo_get() documentation: + * Note that with only one concurrent reader and one concurrent writer, + * you don't need extra locking to use these macro. + */ + while (kfifo_get(&htt->txdone_fifo, &tx_done)) + ath10k_txrx_tx_unref(htt, &tx_done); + + ath10k_mac_tx_push_pending(ar); + + spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags); + skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q); + spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags); + + while ((skb = __skb_dequeue(&tx_ind_q))) { + ath10k_htt_rx_tx_fetch_ind(ar, skb); + dev_kfree_skb_any(skb); + } + +exit: + ath10k_htt_rx_msdu_buff_replenish(htt); + /* In case of rx failure or more data to read, report budget + * to reschedule NAPI poll + */ + done = resched_napi ? budget : quota; + + return done; +} +EXPORT_SYMBOL(ath10k_htt_txrx_compl_task); + +static const struct ath10k_htt_rx_ops htt_rx_ops_32 = { + .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32, + .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32, + .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32, + .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32, + .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32, +}; + +static const struct ath10k_htt_rx_ops htt_rx_ops_64 = { + .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64, + .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64, + .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64, + .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64, + .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64, +}; + +static const struct ath10k_htt_rx_ops htt_rx_ops_hl = { + .htt_rx_proc_rx_frag_ind = ath10k_htt_rx_proc_rx_frag_ind_hl, +}; + +void ath10k_htt_set_rx_ops(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + + if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) + htt->rx_ops = &htt_rx_ops_hl; + else if (ar->hw_params.target_64bit) + htt->rx_ops = &htt_rx_ops_64; + else + htt->rx_ops = &htt_rx_ops_32; +} diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index ef79106db247..d6f1d85ba871 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -1,20 +1,12 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ +#include <linux/export.h> #include <linux/etherdevice.h> #include "htt.h" #include "mac.h" @@ -22,160 +14,597 @@ #include "txrx.h" #include "debug.h" -void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) +static u8 ath10k_htt_tx_txq_calc_size(size_t count) { - htt->num_pending_tx--; - if (htt->num_pending_tx == htt->max_num_pending_tx - 1) - ieee80211_wake_queues(htt->ar->hw); + int exp; + int factor; + + exp = 0; + factor = count >> 7; + + while (factor >= 64 && exp < 4) { + factor >>= 3; + exp++; + } + + if (exp == 4) + return 0xff; + + if (count > 0) + factor = max(1, factor); + + return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) | + SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR); } -static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) +static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) { - spin_lock_bh(&htt->tx_lock); - __ath10k_htt_tx_dec_pending(htt); - spin_unlock_bh(&htt->tx_lock); + struct ath10k *ar = hw->priv; + struct ath10k_sta *arsta; + struct ath10k_vif *arvif = (void *)txq->vif->drv_priv; + unsigned long byte_cnt; + int idx; + u32 bit; + u16 peer_id; + u8 tid; + u8 count; + + lockdep_assert_held(&ar->htt.tx_lock); + + if (!ar->htt.tx_q_state.enabled) + return; + + if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL) + return; + + if (txq->sta) { + arsta = (void *)txq->sta->drv_priv; + peer_id = arsta->peer_id; + } else { + peer_id = arvif->peer_id; + } + + tid = txq->tid; + bit = BIT(peer_id % 32); + idx = peer_id / 32; + + ieee80211_txq_get_depth(txq, NULL, &byte_cnt); + count = ath10k_htt_tx_txq_calc_size(byte_cnt); + + if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || + unlikely(tid >= ar->htt.tx_q_state.num_tids)) { + ath10k_warn(ar, "refusing to update txq for peer_id %u tid %u due to out of bounds\n", + peer_id, tid); + return; + } + + ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count; + ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit; + ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %u tid %u count %u\n", + peer_id, tid, count); } -static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) +static void __ath10k_htt_tx_txq_sync(struct ath10k *ar) { - int ret = 0; + u32 seq; + size_t size; - spin_lock_bh(&htt->tx_lock); + lockdep_assert_held(&ar->htt.tx_lock); - if (htt->num_pending_tx >= htt->max_num_pending_tx) { - ret = -EBUSY; - goto exit; - } + if (!ar->htt.tx_q_state.enabled) + return; + + if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL) + return; + + seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq); + seq++; + ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq); + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n", + seq); + + size = sizeof(*ar->htt.tx_q_state.vaddr); + dma_sync_single_for_device(ar->dev, + ar->htt.tx_q_state.paddr, + size, + DMA_TO_DEVICE); +} + +void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct ath10k *ar = hw->priv; + + spin_lock_bh(&ar->htt.tx_lock); + __ath10k_htt_tx_txq_recalc(hw, txq); + spin_unlock_bh(&ar->htt.tx_lock); +} + +void ath10k_htt_tx_txq_sync(struct ath10k *ar) +{ + spin_lock_bh(&ar->htt.tx_lock); + __ath10k_htt_tx_txq_sync(ar); + spin_unlock_bh(&ar->htt.tx_lock); +} + +void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct ath10k *ar = hw->priv; + + spin_lock_bh(&ar->htt.tx_lock); + __ath10k_htt_tx_txq_recalc(hw, txq); + __ath10k_htt_tx_txq_sync(ar); + spin_unlock_bh(&ar->htt.tx_lock); +} + +void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) +{ + lockdep_assert_held(&htt->tx_lock); + + htt->num_pending_tx--; + if (htt->num_pending_tx == htt->max_num_pending_tx - 1) + ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); + + if (htt->num_pending_tx == 0) + wake_up(&htt->empty_tx_wq); +} + +int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) +{ + lockdep_assert_held(&htt->tx_lock); + + if (htt->num_pending_tx >= htt->max_num_pending_tx) + return -EBUSY; htt->num_pending_tx++; if (htt->num_pending_tx == htt->max_num_pending_tx) - ieee80211_stop_queues(htt->ar->hw); + ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); -exit: - spin_unlock_bh(&htt->tx_lock); - return ret; + return 0; } -int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt) +int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt, + bool is_presp) { - int msdu_id; + struct ath10k *ar = htt->ar; + + lockdep_assert_held(&htt->tx_lock); + + if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres) + return 0; + + if (is_presp && + ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx) + return -EBUSY; + htt->num_pending_mgmt_tx++; + + return 0; +} + +void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt) +{ lockdep_assert_held(&htt->tx_lock); - msdu_id = find_first_zero_bit(htt->used_msdu_ids, - htt->max_num_pending_tx); - if (msdu_id == htt->max_num_pending_tx) - return -ENOBUFS; + if (!htt->ar->hw_params.max_probe_resp_desc_thres) + return; - ath10k_dbg(ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id); - __set_bit(msdu_id, htt->used_msdu_ids); - return msdu_id; + htt->num_pending_mgmt_tx--; +} + +int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb) +{ + struct ath10k *ar = htt->ar; + int ret; + + spin_lock_bh(&htt->tx_lock); + ret = idr_alloc(&htt->pending_tx, skb, 0, + htt->max_num_pending_tx, GFP_ATOMIC); + spin_unlock_bh(&htt->tx_lock); + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret); + + return ret; } void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) { + struct ath10k *ar = htt->ar; + lockdep_assert_held(&htt->tx_lock); - if (!test_bit(msdu_id, htt->used_msdu_ids)) - ath10k_warn("trying to free unallocated msdu_id %d\n", msdu_id); + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %u\n", msdu_id); - ath10k_dbg(ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); - __clear_bit(msdu_id, htt->used_msdu_ids); + idr_remove(&htt->pending_tx, msdu_id); } -int ath10k_htt_tx_attach(struct ath10k_htt *htt) +static void ath10k_htt_tx_free_cont_txbuf_32(struct ath10k_htt *htt) { - u8 pipe; + struct ath10k *ar = htt->ar; + size_t size; - spin_lock_init(&htt->tx_lock); - init_waitqueue_head(&htt->empty_tx_wq); + if (!htt->txbuf.vaddr_txbuff_32) + return; - /* At the beginning free queue number should hint us the maximum - * queue length */ - pipe = htt->ar->htc->endpoint[htt->eid].ul_pipe_id; - htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar, - pipe); + size = htt->txbuf.size; + dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_32, + htt->txbuf.paddr); + htt->txbuf.vaddr_txbuff_32 = NULL; +} - ath10k_dbg(ATH10K_DBG_HTT, "htt tx max num pending tx %d\n", - htt->max_num_pending_tx); +static int ath10k_htt_tx_alloc_cont_txbuf_32(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + size_t size; + + size = htt->max_num_pending_tx * + sizeof(struct ath10k_htt_txbuf_32); + + htt->txbuf.vaddr_txbuff_32 = dma_alloc_coherent(ar->dev, size, + &htt->txbuf.paddr, + GFP_KERNEL); + if (!htt->txbuf.vaddr_txbuff_32) + return -ENOMEM; + + htt->txbuf.size = size; + + return 0; +} + +static void ath10k_htt_tx_free_cont_txbuf_64(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + size_t size; + + if (!htt->txbuf.vaddr_txbuff_64) + return; + + size = htt->txbuf.size; + dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_64, + htt->txbuf.paddr); + htt->txbuf.vaddr_txbuff_64 = NULL; +} + +static int ath10k_htt_tx_alloc_cont_txbuf_64(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + size_t size; + + size = htt->max_num_pending_tx * + sizeof(struct ath10k_htt_txbuf_64); - htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) * - htt->max_num_pending_tx, GFP_KERNEL); - if (!htt->pending_tx) + htt->txbuf.vaddr_txbuff_64 = dma_alloc_coherent(ar->dev, size, + &htt->txbuf.paddr, + GFP_KERNEL); + if (!htt->txbuf.vaddr_txbuff_64) return -ENOMEM; - htt->used_msdu_ids = kzalloc(sizeof(unsigned long) * - BITS_TO_LONGS(htt->max_num_pending_tx), - GFP_KERNEL); - if (!htt->used_msdu_ids) { - kfree(htt->pending_tx); + htt->txbuf.size = size; + + return 0; +} + +static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt *htt) +{ + size_t size; + + if (!htt->frag_desc.vaddr_desc_32) + return; + + size = htt->max_num_pending_tx * + sizeof(struct htt_msdu_ext_desc); + + dma_free_coherent(htt->ar->dev, + size, + htt->frag_desc.vaddr_desc_32, + htt->frag_desc.paddr); + + htt->frag_desc.vaddr_desc_32 = NULL; +} + +static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + size_t size; + + if (!ar->hw_params.continuous_frag_desc) + return 0; + + size = htt->max_num_pending_tx * + sizeof(struct htt_msdu_ext_desc); + htt->frag_desc.vaddr_desc_32 = dma_alloc_coherent(ar->dev, size, + &htt->frag_desc.paddr, + GFP_KERNEL); + if (!htt->frag_desc.vaddr_desc_32) { + ath10k_err(ar, "failed to alloc fragment desc memory\n"); return -ENOMEM; } + htt->frag_desc.size = size; return 0; } -static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt) +static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt *htt) { - struct sk_buff *txdesc; - int msdu_id; + size_t size; + + if (!htt->frag_desc.vaddr_desc_64) + return; + + size = htt->max_num_pending_tx * + sizeof(struct htt_msdu_ext_desc_64); - /* No locks needed. Called after communication with the device has - * been stopped. */ + dma_free_coherent(htt->ar->dev, + size, + htt->frag_desc.vaddr_desc_64, + htt->frag_desc.paddr); - for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) { - if (!test_bit(msdu_id, htt->used_msdu_ids)) - continue; + htt->frag_desc.vaddr_desc_64 = NULL; +} - txdesc = htt->pending_tx[msdu_id]; - if (!txdesc) - continue; +static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + size_t size; - ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", - msdu_id); + if (!ar->hw_params.continuous_frag_desc) + return 0; - if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0) - ATH10K_SKB_CB(txdesc)->htt.refcount = 1; + size = htt->max_num_pending_tx * + sizeof(struct htt_msdu_ext_desc_64); - ATH10K_SKB_CB(txdesc)->htt.discard = true; - ath10k_txrx_tx_unref(htt, txdesc); + htt->frag_desc.vaddr_desc_64 = dma_alloc_coherent(ar->dev, size, + &htt->frag_desc.paddr, + GFP_KERNEL); + if (!htt->frag_desc.vaddr_desc_64) { + ath10k_err(ar, "failed to alloc fragment desc memory\n"); + return -ENOMEM; } + htt->frag_desc.size = size; + + return 0; } -void ath10k_htt_tx_detach(struct ath10k_htt *htt) +static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt) { - ath10k_htt_tx_cleanup_pending(htt); - kfree(htt->pending_tx); - kfree(htt->used_msdu_ids); - return; + struct ath10k *ar = htt->ar; + size_t size; + + if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, + ar->running_fw->fw_file.fw_features)) + return; + + size = sizeof(*htt->tx_q_state.vaddr); + + dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE); + kfree(htt->tx_q_state.vaddr); } -void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) +static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt) { - struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); - struct ath10k_htt *htt = ar->htt; + struct ath10k *ar = htt->ar; + size_t size; + int ret; - if (skb_cb->htt.is_conf) { - dev_kfree_skb_any(skb); - return; + if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, + ar->running_fw->fw_file.fw_features)) + return 0; + + htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS; + htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS; + htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES; + + size = sizeof(*htt->tx_q_state.vaddr); + htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL); + if (!htt->tx_q_state.vaddr) + return -ENOMEM; + + htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr, + size, DMA_TO_DEVICE); + ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr); + if (ret) { + ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret); + kfree(htt->tx_q_state.vaddr); + return -EIO; } - if (skb_cb->is_aborted) { - skb_cb->htt.discard = true; + return 0; +} + +static void ath10k_htt_tx_free_txdone_fifo(struct ath10k_htt *htt) +{ + WARN_ON(!kfifo_is_empty(&htt->txdone_fifo)); + kfifo_free(&htt->txdone_fifo); +} - /* if the skbuff is aborted we need to make sure we'll free up - * the tx resources, we can't simply run tx_unref() 2 times - * because if htt tx completion came in earlier we'd access - * unallocated memory */ - if (skb_cb->htt.refcount > 1) - skb_cb->htt.refcount = 1; +static int ath10k_htt_tx_alloc_txdone_fifo(struct ath10k_htt *htt) +{ + int ret; + size_t size; + + size = roundup_pow_of_two(htt->max_num_pending_tx); + ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL); + return ret; +} + +static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + int ret; + + ret = ath10k_htt_alloc_txbuff(htt); + if (ret) { + ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret); + return ret; + } + + ret = ath10k_htt_alloc_frag_desc(htt); + if (ret) { + ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret); + goto free_txbuf; + } + + ret = ath10k_htt_tx_alloc_txq(htt); + if (ret) { + ath10k_err(ar, "failed to alloc txq: %d\n", ret); + goto free_frag_desc; + } + + ret = ath10k_htt_tx_alloc_txdone_fifo(htt); + if (ret) { + ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret); + goto free_txq; + } + + return 0; + +free_txq: + ath10k_htt_tx_free_txq(htt); + +free_frag_desc: + ath10k_htt_free_frag_desc(htt); + +free_txbuf: + ath10k_htt_free_txbuff(htt); + + return ret; +} + +int ath10k_htt_tx_start(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + int ret; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", + htt->max_num_pending_tx); + + spin_lock_init(&htt->tx_lock); + idr_init(&htt->pending_tx); + + if (htt->tx_mem_allocated) + return 0; + + if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) + return 0; + + ret = ath10k_htt_tx_alloc_buf(htt); + if (ret) + goto free_idr_pending_tx; + + htt->tx_mem_allocated = true; + + return 0; + +free_idr_pending_tx: + idr_destroy(&htt->pending_tx); + + return ret; +} + +static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx) +{ + struct ath10k *ar = ctx; + struct ath10k_htt *htt = &ar->htt; + struct htt_tx_done tx_done = {}; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %u\n", msdu_id); + + tx_done.msdu_id = msdu_id; + tx_done.status = HTT_TX_COMPL_STATE_DISCARD; + + ath10k_txrx_tx_unref(htt, &tx_done); + + return 0; +} + +void ath10k_htt_tx_destroy(struct ath10k_htt *htt) +{ + if (!htt->tx_mem_allocated) + return; + + ath10k_htt_free_txbuff(htt); + ath10k_htt_tx_free_txq(htt); + ath10k_htt_free_frag_desc(htt); + ath10k_htt_tx_free_txdone_fifo(htt); + htt->tx_mem_allocated = false; +} + +static void ath10k_htt_flush_tx_queue(struct ath10k_htt *htt) +{ + ath10k_htc_stop_hl(htt->ar); + idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar); +} + +void ath10k_htt_tx_stop(struct ath10k_htt *htt) +{ + ath10k_htt_flush_tx_queue(htt); + idr_destroy(&htt->pending_tx); +} + +void ath10k_htt_tx_free(struct ath10k_htt *htt) +{ + ath10k_htt_tx_stop(htt); + ath10k_htt_tx_destroy(htt); +} + +void ath10k_htt_op_ep_tx_credits(struct ath10k *ar) +{ + queue_work(ar->workqueue, &ar->bundle_tx_work); +} + +void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) +{ + struct ath10k_htt *htt = &ar->htt; + struct htt_tx_done tx_done = {}; + struct htt_cmd_hdr *htt_hdr; + struct htt_data_tx_desc *desc_hdr = NULL; + u16 flags1 = 0; + u8 msg_type = 0; + + if (htt->disable_tx_comp) { + htt_hdr = (struct htt_cmd_hdr *)skb->data; + msg_type = htt_hdr->msg_type; + + if (msg_type == HTT_H2T_MSG_TYPE_TX_FRM) { + desc_hdr = (struct htt_data_tx_desc *) + (skb->data + sizeof(*htt_hdr)); + flags1 = __le16_to_cpu(desc_hdr->flags1); + skb_pull(skb, sizeof(struct htt_cmd_hdr)); + skb_pull(skb, sizeof(struct htt_data_tx_desc)); + } } - ath10k_txrx_tx_unref(htt, skb); + dev_kfree_skb_any(skb); + + if ((!htt->disable_tx_comp) || (msg_type != HTT_H2T_MSG_TYPE_TX_FRM)) + return; + + ath10k_dbg(ar, ATH10K_DBG_HTT, + "htt tx complete msdu id:%u ,flags1:%x\n", + __le16_to_cpu(desc_hdr->id), flags1); + + if (flags1 & HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE) + return; + + tx_done.status = HTT_TX_COMPL_STATE_ACK; + tx_done.msdu_id = __le16_to_cpu(desc_hdr->id); + ath10k_txrx_tx_unref(&ar->htt, &tx_done); } +void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb) +{ + dev_kfree_skb_any(skb); +} +EXPORT_SYMBOL(ath10k_htt_hif_tx_complete); + int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) { + struct ath10k *ar = htt->ar; struct sk_buff *skb; struct htt_cmd *cmd; int len = 0; @@ -184,7 +613,7 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) len += sizeof(cmd->hdr); len += sizeof(cmd->ver_req); - skb = ath10k_htc_alloc_skb(len); + skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; @@ -192,10 +621,176 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ; - ATH10K_SKB_CB(skb)->htt.is_conf = true; + ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); + if (ret) { + dev_kfree_skb_any(skb); + return ret; + } + + return 0; +} + +int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u32 mask, u32 reset_mask, + u64 cookie) +{ + struct ath10k *ar = htt->ar; + struct htt_stats_req *req; + struct sk_buff *skb; + struct htt_cmd *cmd; + int len = 0, ret; + + len += sizeof(cmd->hdr); + len += sizeof(cmd->stats_req); + + skb = ath10k_htc_alloc_skb(ar, len); + if (!skb) + return -ENOMEM; + + skb_put(skb, len); + cmd = (struct htt_cmd *)skb->data; + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ; + + req = &cmd->stats_req; + + memset(req, 0, sizeof(*req)); + + /* currently we support only max 24 bit masks so no need to worry + * about endian support + */ + memcpy(req->upload_types, &mask, 3); + memcpy(req->reset_types, &reset_mask, 3); + req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID; + req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff); + req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32); + + ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); + if (ret) { + ath10k_warn(ar, "failed to send htt type stats request: %d", + ret); + dev_kfree_skb_any(skb); + return ret; + } + + return 0; +} + +static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + struct sk_buff *skb; + struct htt_cmd *cmd; + struct htt_frag_desc_bank_cfg32 *cfg; + int ret, size; + u8 info; + + if (!ar->hw_params.continuous_frag_desc) + return 0; + + if (!htt->frag_desc.paddr) { + ath10k_warn(ar, "invalid frag desc memory\n"); + return -EINVAL; + } + + size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg32); + skb = ath10k_htc_alloc_skb(ar, size); + if (!skb) + return -ENOMEM; + + skb_put(skb, size); + cmd = (struct htt_cmd *)skb->data; + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; + + info = 0; + info |= SM(htt->tx_q_state.type, + HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE); + + if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, + ar->running_fw->fw_file.fw_features)) + info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID; + + cfg = &cmd->frag_desc_bank_cfg32; + cfg->info = info; + cfg->num_banks = 1; + cfg->desc_size = sizeof(struct htt_msdu_ext_desc); + cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr); + cfg->bank_id[0].bank_min_id = 0; + cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx - + 1); + + cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr); + cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers); + cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids); + cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE; + cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n"); + + ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); + if (ret) { + ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n", + ret); + dev_kfree_skb_any(skb); + return ret; + } - ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb); + return 0; +} + +static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + struct sk_buff *skb; + struct htt_cmd *cmd; + struct htt_frag_desc_bank_cfg64 *cfg; + int ret, size; + u8 info; + + if (!ar->hw_params.continuous_frag_desc) + return 0; + + if (!htt->frag_desc.paddr) { + ath10k_warn(ar, "invalid frag desc memory\n"); + return -EINVAL; + } + + size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg64); + skb = ath10k_htc_alloc_skb(ar, size); + if (!skb) + return -ENOMEM; + + skb_put(skb, size); + cmd = (struct htt_cmd *)skb->data; + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; + + info = 0; + info |= SM(htt->tx_q_state.type, + HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE); + + if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, + ar->running_fw->fw_file.fw_features)) + info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID; + + cfg = &cmd->frag_desc_bank_cfg64; + cfg->info = info; + cfg->num_banks = 1; + cfg->desc_size = sizeof(struct htt_msdu_ext_desc_64); + cfg->bank_base_addrs[0] = __cpu_to_le64(htt->frag_desc.paddr); + cfg->bank_id[0].bank_min_id = 0; + cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx - + 1); + + cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr); + cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers); + cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids); + cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE; + cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n"); + + ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { + ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n", + ret); dev_kfree_skb_any(skb); return ret; } @@ -203,11 +798,25 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) return 0; } -int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) +static void ath10k_htt_fill_rx_desc_offset_32(struct ath10k_hw_params *hw, + struct htt_rx_ring_setup_ring32 *rx_ring) +{ + ath10k_htt_rx_desc_get_offsets(hw, &rx_ring->offsets); +} + +static void ath10k_htt_fill_rx_desc_offset_64(struct ath10k_hw_params *hw, + struct htt_rx_ring_setup_ring64 *rx_ring) +{ + ath10k_htt_rx_desc_get_offsets(hw, &rx_ring->offsets); +} + +static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt) { + struct ath10k *ar = htt->ar; + struct ath10k_hw_params *hw = &ar->hw_params; struct sk_buff *skb; struct htt_cmd *cmd; - struct htt_rx_ring_setup_ring *ring; + struct htt_rx_ring_setup_ring32 *ring; const int num_rx_ring = 1; u16 flags; u32 fw_idx; @@ -221,19 +830,19 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); - len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr) + len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr) + (sizeof(*ring) * num_rx_ring); - skb = ath10k_htc_alloc_skb(len); + skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; - ring = &cmd->rx_setup.rings[0]; + ring = &cmd->rx_setup_32.rings[0]; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; - cmd->rx_setup.hdr.num_rings = 1; + cmd->rx_setup_32.hdr.num_rings = 1; /* FIXME: do we need all of this? */ flags = 0; @@ -264,24 +873,78 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) ring->flags = __cpu_to_le16(flags); ring->fw_idx_init_val = __cpu_to_le16(fw_idx); -#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4) + ath10k_htt_fill_rx_desc_offset_32(hw, ring); + ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); + if (ret) { + dev_kfree_skb_any(skb); + return ret; + } - ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); - ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload)); - ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start)); - ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end)); - ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start)); - ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end)); - ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start)); - ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end)); - ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention)); - ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info)); + return 0; +} -#undef desc_offset +static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + struct ath10k_hw_params *hw = &ar->hw_params; + struct sk_buff *skb; + struct htt_cmd *cmd; + struct htt_rx_ring_setup_ring64 *ring; + const int num_rx_ring = 1; + u16 flags; + u32 fw_idx; + int len; + int ret; - ATH10K_SKB_CB(skb)->htt.is_conf = true; + /* HW expects the buffer to be an integral number of 4-byte + * "words" + */ + BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); + BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); + + len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_64.hdr) + + (sizeof(*ring) * num_rx_ring); + skb = ath10k_htc_alloc_skb(ar, len); + if (!skb) + return -ENOMEM; + + skb_put(skb, len); + + cmd = (struct htt_cmd *)skb->data; + ring = &cmd->rx_setup_64.rings[0]; - ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb); + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; + cmd->rx_setup_64.hdr.num_rings = 1; + + flags = 0; + flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; + flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; + flags |= HTT_RX_RING_FLAGS_PPDU_START; + flags |= HTT_RX_RING_FLAGS_PPDU_END; + flags |= HTT_RX_RING_FLAGS_MPDU_START; + flags |= HTT_RX_RING_FLAGS_MPDU_END; + flags |= HTT_RX_RING_FLAGS_MSDU_START; + flags |= HTT_RX_RING_FLAGS_MSDU_END; + flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; + flags |= HTT_RX_RING_FLAGS_FRAG_INFO; + flags |= HTT_RX_RING_FLAGS_UNICAST_RX; + flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; + flags |= HTT_RX_RING_FLAGS_CTRL_RX; + flags |= HTT_RX_RING_FLAGS_MGMT_RX; + flags |= HTT_RX_RING_FLAGS_NULL_RX; + flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; + + fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); + + ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr); + ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr); + ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); + ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); + ring->flags = __cpu_to_le16(flags); + ring->fw_idx_init_val = __cpu_to_le16(fw_idx); + + ath10k_htt_fill_rx_desc_offset_64(hw, ring); + ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; @@ -290,47 +953,288 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) return 0; } +static int ath10k_htt_send_rx_ring_cfg_hl(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + struct sk_buff *skb; + struct htt_cmd *cmd; + struct htt_rx_ring_setup_ring32 *ring; + const int num_rx_ring = 1; + u16 flags; + int len; + int ret; + + /* + * the HW expects the buffer to be an integral number of 4-byte + * "words" + */ + BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); + BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); + + len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr) + + (sizeof(*ring) * num_rx_ring); + skb = ath10k_htc_alloc_skb(ar, len); + if (!skb) + return -ENOMEM; + + skb_put(skb, len); + + cmd = (struct htt_cmd *)skb->data; + ring = &cmd->rx_setup_32.rings[0]; + + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; + cmd->rx_setup_32.hdr.num_rings = 1; + + flags = 0; + flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; + flags |= HTT_RX_RING_FLAGS_UNICAST_RX; + flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; + + memset(ring, 0, sizeof(*ring)); + ring->rx_ring_len = __cpu_to_le16(HTT_RX_RING_SIZE_MIN); + ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); + ring->flags = __cpu_to_le16(flags); + + ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); + if (ret) { + dev_kfree_skb_any(skb); + return ret; + } + + return 0; +} + +static int ath10k_htt_h2t_aggr_cfg_msg_32(struct ath10k_htt *htt, + u8 max_subfrms_ampdu, + u8 max_subfrms_amsdu) +{ + struct ath10k *ar = htt->ar; + struct htt_aggr_conf *aggr_conf; + struct sk_buff *skb; + struct htt_cmd *cmd; + int len; + int ret; + + /* Firmware defaults are: amsdu = 3 and ampdu = 64 */ + + if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64) + return -EINVAL; + + if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31) + return -EINVAL; + + len = sizeof(cmd->hdr); + len += sizeof(cmd->aggr_conf); + + skb = ath10k_htc_alloc_skb(ar, len); + if (!skb) + return -ENOMEM; + + skb_put(skb, len); + cmd = (struct htt_cmd *)skb->data; + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG; + + aggr_conf = &cmd->aggr_conf; + aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu; + aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d", + aggr_conf->max_num_amsdu_subframes, + aggr_conf->max_num_ampdu_subframes); + + ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); + if (ret) { + dev_kfree_skb_any(skb); + return ret; + } + + return 0; +} + +static int ath10k_htt_h2t_aggr_cfg_msg_v2(struct ath10k_htt *htt, + u8 max_subfrms_ampdu, + u8 max_subfrms_amsdu) +{ + struct ath10k *ar = htt->ar; + struct htt_aggr_conf_v2 *aggr_conf; + struct sk_buff *skb; + struct htt_cmd *cmd; + int len; + int ret; + + /* Firmware defaults are: amsdu = 3 and ampdu = 64 */ + + if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64) + return -EINVAL; + + if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31) + return -EINVAL; + + len = sizeof(cmd->hdr); + len += sizeof(cmd->aggr_conf_v2); + + skb = ath10k_htc_alloc_skb(ar, len); + if (!skb) + return -ENOMEM; + + skb_put(skb, len); + cmd = (struct htt_cmd *)skb->data; + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG; + + aggr_conf = &cmd->aggr_conf_v2; + aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu; + aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d", + aggr_conf->max_num_amsdu_subframes, + aggr_conf->max_num_ampdu_subframes); + + ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); + if (ret) { + dev_kfree_skb_any(skb); + return ret; + } + + return 0; +} + +int ath10k_htt_tx_fetch_resp(struct ath10k *ar, + __le32 token, + __le16 fetch_seq_num, + struct htt_tx_fetch_record *records, + size_t num_records) +{ + struct sk_buff *skb; + struct htt_cmd *cmd; + const u16 resp_id = 0; + int len = 0; + int ret; + + /* Response IDs are echo-ed back only for host driver convenience + * purposes. They aren't used for anything in the driver yet so use 0. + */ + + len += sizeof(cmd->hdr); + len += sizeof(cmd->tx_fetch_resp); + len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records; + + skb = ath10k_htc_alloc_skb(ar, len); + if (!skb) + return -ENOMEM; + + skb_put(skb, len); + cmd = (struct htt_cmd *)skb->data; + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP; + cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id); + cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num; + cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records); + cmd->tx_fetch_resp.token = token; + + memcpy(cmd->tx_fetch_resp.records, records, + sizeof(records[0]) * num_records); + + ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb); + if (ret) { + ath10k_warn(ar, "failed to submit htc command: %d\n", ret); + goto err_free_skb; + } + + return 0; + +err_free_skb: + dev_kfree_skb_any(skb); + + return ret; +} + +static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); + struct ath10k_vif *arvif; + + if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { + return ar->scan.vdev_id; + } else if (cb->vif) { + arvif = (void *)cb->vif->drv_priv; + return arvif->vdev_id; + } else if (ar->monitor_started) { + return ar->monitor_vdev_id; + } else { + return 0; + } +} + +static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); + + if (!is_eth && ieee80211_is_mgmt(hdr->frame_control)) + return HTT_DATA_TX_EXT_TID_MGMT; + else if (cb->flags & ATH10K_SKB_F_QOS) + return skb->priority & IEEE80211_QOS_CTL_TID_MASK; + else + return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; +} + int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { - struct device *dev = htt->ar->dev; - struct ath10k_skb_cb *skb_cb; + struct ath10k *ar = htt->ar; + struct device *dev = ar->dev; struct sk_buff *txdesc = NULL; struct htt_cmd *cmd; - u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id; + struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); + u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); int len = 0; int msdu_id = -1; int res; - - - res = ath10k_htt_tx_inc_pending(htt); - if (res) - return res; + const u8 *peer_addr; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; len += sizeof(cmd->hdr); len += sizeof(cmd->mgmt_tx); - txdesc = ath10k_htc_alloc_skb(len); - if (!txdesc) { - res = -ENOMEM; + res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); + if (res < 0) goto err; + + msdu_id = res; + + if ((ieee80211_is_action(hdr->frame_control) || + ieee80211_is_deauth(hdr->frame_control) || + ieee80211_is_disassoc(hdr->frame_control)) && + ieee80211_has_protected(hdr->frame_control)) { + peer_addr = hdr->addr1; + if (is_multicast_ether_addr(peer_addr)) { + skb_put(msdu, sizeof(struct ieee80211_mmie_16)); + } else { + if (skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP || + skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP_256) + skb_put(msdu, IEEE80211_GCMP_MIC_LEN); + else + skb_put(msdu, IEEE80211_CCMP_MIC_LEN); + } } - spin_lock_bh(&htt->tx_lock); - msdu_id = ath10k_htt_tx_alloc_msdu_id(htt); - if (msdu_id < 0) { - spin_unlock_bh(&htt->tx_lock); - res = msdu_id; - goto err; + txdesc = ath10k_htc_alloc_skb(ar, len); + if (!txdesc) { + res = -ENOMEM; + goto err_free_msdu_id; } - htt->pending_tx[msdu_id] = txdesc; - spin_unlock_bh(&htt->tx_lock); - res = ath10k_skb_map(dev, msdu); - if (res) - goto err; + skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, + DMA_TO_DEVICE); + res = dma_mapping_error(dev, skb_cb->paddr); + if (res) { + res = -EIO; + goto err_free_txdesc; + } skb_put(txdesc, len); cmd = (struct htt_cmd *)txdesc->data; + memset(cmd, 0, len); + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); @@ -339,172 +1243,600 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) memcpy(cmd->mgmt_tx.hdr, msdu->data, min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); - /* refcount is decremented by HTC and HTT completions until it reaches - * zero and is freed */ - skb_cb = ATH10K_SKB_CB(txdesc); - skb_cb->htt.msdu_id = msdu_id; - skb_cb->htt.refcount = 2; - skb_cb->htt.msdu = msdu; - - res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc); + res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) - goto err; + goto err_unmap_msdu; return 0; +err_unmap_msdu: + if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) + dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); +err_free_txdesc: + dev_kfree_skb_any(txdesc); +err_free_msdu_id: + spin_lock_bh(&htt->tx_lock); + ath10k_htt_tx_free_msdu_id(htt, msdu_id); + spin_unlock_bh(&htt->tx_lock); err: - ath10k_skb_unmap(dev, msdu); + return res; +} + +#define HTT_TX_HL_NEEDED_HEADROOM \ + (unsigned int)(sizeof(struct htt_cmd_hdr) + \ + sizeof(struct htt_data_tx_desc) + \ + sizeof(struct ath10k_htc_hdr)) + +static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, + struct sk_buff *msdu) +{ + struct ath10k *ar = htt->ar; + int res, data_len; + struct htt_cmd_hdr *cmd_hdr; + struct htt_data_tx_desc *tx_desc; + struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); + struct sk_buff *tmp_skb; + bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); + u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); + u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth); + u8 flags0 = 0; + u16 flags1 = 0; + u16 msdu_id = 0; + + if (!is_eth) { + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; + + if ((ieee80211_is_action(hdr->frame_control) || + ieee80211_is_deauth(hdr->frame_control) || + ieee80211_is_disassoc(hdr->frame_control)) && + ieee80211_has_protected(hdr->frame_control)) { + skb_put(msdu, IEEE80211_CCMP_MIC_LEN); + } + } + + data_len = msdu->len; + + switch (txmode) { + case ATH10K_HW_TXRX_RAW: + case ATH10K_HW_TXRX_NATIVE_WIFI: + flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; + fallthrough; + case ATH10K_HW_TXRX_ETHERNET: + flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); + break; + case ATH10K_HW_TXRX_MGMT: + flags0 |= SM(ATH10K_HW_TXRX_MGMT, + HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); + flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; + + if (htt->disable_tx_comp) + flags1 |= HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE; + break; + } + + if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) + flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; + + flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); + flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); + if (msdu->ip_summed == CHECKSUM_PARTIAL && + !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { + flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; + flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; + } + + /* Prepend the HTT header and TX desc struct to the data message + * and realloc the skb if it does not have enough headroom. + */ + if (skb_headroom(msdu) < HTT_TX_HL_NEEDED_HEADROOM) { + tmp_skb = msdu; + + ath10k_dbg(htt->ar, ATH10K_DBG_HTT, + "Not enough headroom in skb. Current headroom: %u, needed: %u. Reallocating...\n", + skb_headroom(msdu), HTT_TX_HL_NEEDED_HEADROOM); + msdu = skb_realloc_headroom(msdu, HTT_TX_HL_NEEDED_HEADROOM); + kfree_skb(tmp_skb); + if (!msdu) { + ath10k_warn(htt->ar, "htt hl tx: Unable to realloc skb!\n"); + res = -ENOMEM; + goto out; + } + } - if (txdesc) - dev_kfree_skb_any(txdesc); - if (msdu_id >= 0) { - spin_lock_bh(&htt->tx_lock); - htt->pending_tx[msdu_id] = NULL; - ath10k_htt_tx_free_msdu_id(htt, msdu_id); - spin_unlock_bh(&htt->tx_lock); + if (ar->bus_param.hl_msdu_ids) { + flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; + res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); + if (res < 0) { + ath10k_err(ar, "msdu_id allocation failed %d\n", res); + goto out; + } + msdu_id = res; } - ath10k_htt_tx_dec_pending(htt); + + /* As msdu is freed by mac80211 (in ieee80211_tx_status()) and by + * ath10k (in ath10k_htt_htc_tx_complete()) we have to increase + * reference by one to avoid a use-after-free case and a double + * free. + */ + skb_get(msdu); + + skb_push(msdu, sizeof(*cmd_hdr)); + skb_push(msdu, sizeof(*tx_desc)); + cmd_hdr = (struct htt_cmd_hdr *)msdu->data; + tx_desc = (struct htt_data_tx_desc *)(msdu->data + sizeof(*cmd_hdr)); + + cmd_hdr->msg_type = HTT_H2T_MSG_TYPE_TX_FRM; + tx_desc->flags0 = flags0; + tx_desc->flags1 = __cpu_to_le16(flags1); + tx_desc->len = __cpu_to_le16(data_len); + tx_desc->id = __cpu_to_le16(msdu_id); + tx_desc->frags_paddr = 0; /* always zero */ + /* Initialize peer_id to INVALID_PEER because this is NOT + * Reinjection path + */ + tx_desc->peerid = __cpu_to_le32(HTT_INVALID_PEERID); + + res = ath10k_htc_send_hl(&htt->ar->htc, htt->eid, msdu); + +out: return res; } -int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) +static int ath10k_htt_tx_32(struct ath10k_htt *htt, + enum ath10k_hw_txrx_mode txmode, + struct sk_buff *msdu) { - struct device *dev = htt->ar->dev; - struct htt_cmd *cmd; - struct htt_data_tx_desc_frag *tx_frags; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; - struct ath10k_skb_cb *skb_cb; - struct sk_buff *txdesc = NULL; - struct sk_buff *txfrag = NULL; - u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id; - u8 tid; - int prefetch_len, desc_len, frag_len; - dma_addr_t frags_paddr; - int msdu_id = -1; + struct ath10k *ar = htt->ar; + struct device *dev = ar->dev; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); + struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); + struct ath10k_hif_sg_item sg_items[2]; + struct ath10k_htt_txbuf_32 *txbuf; + struct htt_data_tx_desc_frag *frags; + bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); + u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); + u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth); + int prefetch_len; int res; - u8 flags0; - u16 flags1; + u8 flags0 = 0; + u16 msdu_id, flags1 = 0; + u16 freq = 0; + u32 frags_paddr = 0; + u32 txbuf_paddr; + struct htt_msdu_ext_desc *ext_desc = NULL; + struct htt_msdu_ext_desc *ext_desc_t = NULL; + + res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); + if (res < 0) + goto err; - res = ath10k_htt_tx_inc_pending(htt); - if (res) - return res; + msdu_id = res; prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = roundup(prefetch_len, 4); - desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len; - frag_len = sizeof(*tx_frags) * 2; + txbuf = htt->txbuf.vaddr_txbuff_32 + msdu_id; + txbuf_paddr = htt->txbuf.paddr + + (sizeof(struct ath10k_htt_txbuf_32) * msdu_id); + + if (!is_eth) { + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; + + if ((ieee80211_is_action(hdr->frame_control) || + ieee80211_is_deauth(hdr->frame_control) || + ieee80211_is_disassoc(hdr->frame_control)) && + ieee80211_has_protected(hdr->frame_control)) { + skb_put(msdu, IEEE80211_CCMP_MIC_LEN); + } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) && + txmode == ATH10K_HW_TXRX_RAW && + ieee80211_has_protected(hdr->frame_control)) { + skb_put(msdu, IEEE80211_CCMP_MIC_LEN); + } + } - txdesc = ath10k_htc_alloc_skb(desc_len); - if (!txdesc) { - res = -ENOMEM; - goto err; + skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, + DMA_TO_DEVICE); + res = dma_mapping_error(dev, skb_cb->paddr); + if (res) { + res = -EIO; + goto err_free_msdu_id; } - txfrag = dev_alloc_skb(frag_len); - if (!txfrag) { - res = -ENOMEM; - goto err; + if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) + freq = ar->scan.roc_freq; + + switch (txmode) { + case ATH10K_HW_TXRX_RAW: + case ATH10K_HW_TXRX_NATIVE_WIFI: + flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; + fallthrough; + case ATH10K_HW_TXRX_ETHERNET: + if (ar->hw_params.continuous_frag_desc) { + ext_desc_t = htt->frag_desc.vaddr_desc_32; + memset(&ext_desc_t[msdu_id], 0, + sizeof(struct htt_msdu_ext_desc)); + frags = (struct htt_data_tx_desc_frag *) + &ext_desc_t[msdu_id].frags; + ext_desc = &ext_desc_t[msdu_id]; + frags[0].tword_addr.paddr_lo = + __cpu_to_le32(skb_cb->paddr); + frags[0].tword_addr.paddr_hi = 0; + frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); + + frags_paddr = htt->frag_desc.paddr + + (sizeof(struct htt_msdu_ext_desc) * msdu_id); + } else { + frags = txbuf->frags; + frags[0].dword_addr.paddr = + __cpu_to_le32(skb_cb->paddr); + frags[0].dword_addr.len = __cpu_to_le32(msdu->len); + frags[1].dword_addr.paddr = 0; + frags[1].dword_addr.len = 0; + + frags_paddr = txbuf_paddr; + } + flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); + break; + case ATH10K_HW_TXRX_MGMT: + flags0 |= SM(ATH10K_HW_TXRX_MGMT, + HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); + flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; + + frags_paddr = skb_cb->paddr; + break; } - if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) { - ath10k_warn("htt alignment check failed. dropping packet.\n"); - res = -EIO; - goto err; + /* Normally all commands go through HTC which manages tx credits for + * each endpoint and notifies when tx is completed. + * + * HTT endpoint is creditless so there's no need to care about HTC + * flags. In that case it is trivial to fill the HTC header here. + * + * MSDU transmission is considered completed upon HTT event. This + * implies no relevant resources can be freed until after the event is + * received. That's why HTC tx completion handler itself is ignored by + * setting NULL to transfer_context for all sg items. + * + * There is simply no point in pushing HTT TX_FRM through HTC tx path + * as it's a waste of resources. By bypassing HTC it is possible to + * avoid extra memory allocations, compress data structures and thus + * improve performance. + */ + + txbuf->htc_hdr.eid = htt->eid; + txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) + + sizeof(txbuf->cmd_tx) + + prefetch_len); + txbuf->htc_hdr.flags = 0; + + if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) + flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; + + flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); + flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); + if (msdu->ip_summed == CHECKSUM_PARTIAL && + !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { + flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; + flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; + if (ar->hw_params.continuous_frag_desc) + ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE; } - spin_lock_bh(&htt->tx_lock); - msdu_id = ath10k_htt_tx_alloc_msdu_id(htt); - if (msdu_id < 0) { - spin_unlock_bh(&htt->tx_lock); - res = msdu_id; - goto err; + /* Prevent firmware from sending up tx inspection requests. There's + * nothing ath10k can do with frames requested for inspection so force + * it to simply rely a regular tx completion with discard status. + */ + flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; + + txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; + txbuf->cmd_tx.flags0 = flags0; + txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); + txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); + txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); + txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); + if (ath10k_mac_tx_frm_has_freq(ar)) { + txbuf->cmd_tx.offchan_tx.peerid = + __cpu_to_le16(HTT_INVALID_PEERID); + txbuf->cmd_tx.offchan_tx.freq = + __cpu_to_le16(freq); + } else { + txbuf->cmd_tx.peerid = + __cpu_to_le32(HTT_INVALID_PEERID); } - htt->pending_tx[msdu_id] = txdesc; - spin_unlock_bh(&htt->tx_lock); - res = ath10k_skb_map(dev, msdu); + trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); + ath10k_dbg(ar, ATH10K_DBG_HTT, + "htt tx flags0 %u flags1 %u len %d id %u frags_paddr %pad, msdu_paddr %pad vdev %u tid %u freq %u\n", + flags0, flags1, msdu->len, msdu_id, &frags_paddr, + &skb_cb->paddr, vdev_id, tid, freq); + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", + msdu->data, msdu->len); + trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); + trace_ath10k_tx_payload(ar, msdu->data, msdu->len); + + sg_items[0].transfer_id = 0; + sg_items[0].transfer_context = NULL; + sg_items[0].vaddr = &txbuf->htc_hdr; + sg_items[0].paddr = txbuf_paddr + + sizeof(txbuf->frags); + sg_items[0].len = sizeof(txbuf->htc_hdr) + + sizeof(txbuf->cmd_hdr) + + sizeof(txbuf->cmd_tx); + + sg_items[1].transfer_id = 0; + sg_items[1].transfer_context = NULL; + sg_items[1].vaddr = msdu->data; + sg_items[1].paddr = skb_cb->paddr; + sg_items[1].len = prefetch_len; + + res = ath10k_hif_tx_sg(htt->ar, + htt->ar->htc.endpoint[htt->eid].ul_pipe_id, + sg_items, ARRAY_SIZE(sg_items)); if (res) - goto err; + goto err_unmap_msdu; - /* tx fragment list must be terminated with zero-entry */ - skb_put(txfrag, frag_len); - tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data; - tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); - tx_frags[0].len = __cpu_to_le32(msdu->len); - tx_frags[1].paddr = __cpu_to_le32(0); - tx_frags[1].len = __cpu_to_le32(0); + return 0; - res = ath10k_skb_map(dev, txfrag); - if (res) +err_unmap_msdu: + dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); +err_free_msdu_id: + spin_lock_bh(&htt->tx_lock); + ath10k_htt_tx_free_msdu_id(htt, msdu_id); + spin_unlock_bh(&htt->tx_lock); +err: + return res; +} + +static int ath10k_htt_tx_64(struct ath10k_htt *htt, + enum ath10k_hw_txrx_mode txmode, + struct sk_buff *msdu) +{ + struct ath10k *ar = htt->ar; + struct device *dev = ar->dev; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); + struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); + struct ath10k_hif_sg_item sg_items[2]; + struct ath10k_htt_txbuf_64 *txbuf; + struct htt_data_tx_desc_frag *frags; + bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); + u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); + u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth); + int prefetch_len; + int res; + u8 flags0 = 0; + u16 msdu_id, flags1 = 0; + u16 freq = 0; + dma_addr_t frags_paddr = 0; + dma_addr_t txbuf_paddr; + struct htt_msdu_ext_desc_64 *ext_desc = NULL; + struct htt_msdu_ext_desc_64 *ext_desc_t = NULL; + + res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); + if (res < 0) goto err; - ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx msdu 0x%llx\n", - (unsigned long long) ATH10K_SKB_CB(txfrag)->paddr, - (unsigned long long) ATH10K_SKB_CB(msdu)->paddr); - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ", - txfrag->data, frag_len); - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ", - msdu->data, msdu->len); + msdu_id = res; - skb_put(txdesc, desc_len); - cmd = (struct htt_cmd *)txdesc->data; - memset(cmd, 0, desc_len); + prefetch_len = min(htt->prefetch_len, msdu->len); + prefetch_len = roundup(prefetch_len, 4); - tid = ATH10K_SKB_CB(msdu)->htt.tid; + txbuf = htt->txbuf.vaddr_txbuff_64 + msdu_id; + txbuf_paddr = htt->txbuf.paddr + + (sizeof(struct ath10k_htt_txbuf_64) * msdu_id); + + if (!is_eth) { + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; + + if ((ieee80211_is_action(hdr->frame_control) || + ieee80211_is_deauth(hdr->frame_control) || + ieee80211_is_disassoc(hdr->frame_control)) && + ieee80211_has_protected(hdr->frame_control)) { + skb_put(msdu, IEEE80211_CCMP_MIC_LEN); + } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) && + txmode == ATH10K_HW_TXRX_RAW && + ieee80211_has_protected(hdr->frame_control)) { + skb_put(msdu, IEEE80211_CCMP_MIC_LEN); + } + } - ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid); + skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, + DMA_TO_DEVICE); + res = dma_mapping_error(dev, skb_cb->paddr); + if (res) { + res = -EIO; + goto err_free_msdu_id; + } - flags0 = 0; - if (!ieee80211_has_protected(hdr->frame_control)) - flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; - flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; - flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI, - HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); + if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) + freq = ar->scan.roc_freq; + + switch (txmode) { + case ATH10K_HW_TXRX_RAW: + case ATH10K_HW_TXRX_NATIVE_WIFI: + flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; + fallthrough; + case ATH10K_HW_TXRX_ETHERNET: + if (ar->hw_params.continuous_frag_desc) { + ext_desc_t = htt->frag_desc.vaddr_desc_64; + memset(&ext_desc_t[msdu_id], 0, + sizeof(struct htt_msdu_ext_desc_64)); + frags = (struct htt_data_tx_desc_frag *) + &ext_desc_t[msdu_id].frags; + ext_desc = &ext_desc_t[msdu_id]; + frags[0].tword_addr.paddr_lo = + __cpu_to_le32(skb_cb->paddr); + frags[0].tword_addr.paddr_hi = + __cpu_to_le16(upper_32_bits(skb_cb->paddr)); + frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); + + frags_paddr = htt->frag_desc.paddr + + (sizeof(struct htt_msdu_ext_desc_64) * msdu_id); + } else { + frags = txbuf->frags; + frags[0].tword_addr.paddr_lo = + __cpu_to_le32(skb_cb->paddr); + frags[0].tword_addr.paddr_hi = + __cpu_to_le16(upper_32_bits(skb_cb->paddr)); + frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); + frags[1].tword_addr.paddr_lo = 0; + frags[1].tword_addr.paddr_hi = 0; + frags[1].tword_addr.len_16 = 0; + } + flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); + break; + case ATH10K_HW_TXRX_MGMT: + flags0 |= SM(ATH10K_HW_TXRX_MGMT, + HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); + flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; + + frags_paddr = skb_cb->paddr; + break; + } - flags1 = 0; - flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); - flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); + /* Normally all commands go through HTC which manages tx credits for + * each endpoint and notifies when tx is completed. + * + * HTT endpoint is creditless so there's no need to care about HTC + * flags. In that case it is trivial to fill the HTC header here. + * + * MSDU transmission is considered completed upon HTT event. This + * implies no relevant resources can be freed until after the event is + * received. That's why HTC tx completion handler itself is ignored by + * setting NULL to transfer_context for all sg items. + * + * There is simply no point in pushing HTT TX_FRM through HTC tx path + * as it's a waste of resources. By bypassing HTC it is possible to + * avoid extra memory allocations, compress data structures and thus + * improve performance. + */ - frags_paddr = ATH10K_SKB_CB(txfrag)->paddr; + txbuf->htc_hdr.eid = htt->eid; + txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) + + sizeof(txbuf->cmd_tx) + + prefetch_len); + txbuf->htc_hdr.flags = 0; - cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; - cmd->data_tx.flags0 = flags0; - cmd->data_tx.flags1 = __cpu_to_le16(flags1); - cmd->data_tx.len = __cpu_to_le16(msdu->len); - cmd->data_tx.id = __cpu_to_le16(msdu_id); - cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr); - cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); + if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) + flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; - memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len); + flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); + flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); + if (msdu->ip_summed == CHECKSUM_PARTIAL && + !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { + flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; + flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; + if (ar->hw_params.continuous_frag_desc) { + memset(ext_desc->tso_flag, 0, sizeof(ext_desc->tso_flag)); + ext_desc->tso_flag[3] |= + __cpu_to_le32(HTT_MSDU_CHECKSUM_ENABLE_64); + } + } - /* refcount is decremented by HTC and HTT completions until it reaches - * zero and is freed */ - skb_cb = ATH10K_SKB_CB(txdesc); - skb_cb->htt.msdu_id = msdu_id; - skb_cb->htt.refcount = 2; - skb_cb->htt.txfrag = txfrag; - skb_cb->htt.msdu = msdu; + /* Prevent firmware from sending up tx inspection requests. There's + * nothing ath10k can do with frames requested for inspection so force + * it to simply rely a regular tx completion with discard status. + */ + flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; + + txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; + txbuf->cmd_tx.flags0 = flags0; + txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); + txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); + txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); + + /* fill fragment descriptor */ + txbuf->cmd_tx.frags_paddr = __cpu_to_le64(frags_paddr); + if (ath10k_mac_tx_frm_has_freq(ar)) { + txbuf->cmd_tx.offchan_tx.peerid = + __cpu_to_le16(HTT_INVALID_PEERID); + txbuf->cmd_tx.offchan_tx.freq = + __cpu_to_le16(freq); + } else { + txbuf->cmd_tx.peerid = + __cpu_to_le32(HTT_INVALID_PEERID); + } - res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc); + trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); + ath10k_dbg(ar, ATH10K_DBG_HTT, + "htt tx flags0 %u flags1 %u len %d id %u frags_paddr %pad, msdu_paddr %pad vdev %u tid %u freq %u\n", + flags0, flags1, msdu->len, msdu_id, &frags_paddr, + &skb_cb->paddr, vdev_id, tid, freq); + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", + msdu->data, msdu->len); + trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); + trace_ath10k_tx_payload(ar, msdu->data, msdu->len); + + sg_items[0].transfer_id = 0; + sg_items[0].transfer_context = NULL; + sg_items[0].vaddr = &txbuf->htc_hdr; + sg_items[0].paddr = txbuf_paddr + + sizeof(txbuf->frags); + sg_items[0].len = sizeof(txbuf->htc_hdr) + + sizeof(txbuf->cmd_hdr) + + sizeof(txbuf->cmd_tx); + + sg_items[1].transfer_id = 0; + sg_items[1].transfer_context = NULL; + sg_items[1].vaddr = msdu->data; + sg_items[1].paddr = skb_cb->paddr; + sg_items[1].len = prefetch_len; + + res = ath10k_hif_tx_sg(htt->ar, + htt->ar->htc.endpoint[htt->eid].ul_pipe_id, + sg_items, ARRAY_SIZE(sg_items)); if (res) - goto err; + goto err_unmap_msdu; return 0; + +err_unmap_msdu: + dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); +err_free_msdu_id: + spin_lock_bh(&htt->tx_lock); + ath10k_htt_tx_free_msdu_id(htt, msdu_id); + spin_unlock_bh(&htt->tx_lock); err: - if (txfrag) - ath10k_skb_unmap(dev, txfrag); - if (txdesc) - dev_kfree_skb_any(txdesc); - if (txfrag) - dev_kfree_skb_any(txfrag); - if (msdu_id >= 0) { - spin_lock_bh(&htt->tx_lock); - htt->pending_tx[msdu_id] = NULL; - ath10k_htt_tx_free_msdu_id(htt, msdu_id); - spin_unlock_bh(&htt->tx_lock); - } - ath10k_htt_tx_dec_pending(htt); - ath10k_skb_unmap(dev, msdu); return res; } + +static const struct ath10k_htt_tx_ops htt_tx_ops_32 = { + .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_32, + .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32, + .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_32, + .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_32, + .htt_tx = ath10k_htt_tx_32, + .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32, + .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32, + .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32, +}; + +static const struct ath10k_htt_tx_ops htt_tx_ops_64 = { + .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_64, + .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_64, + .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_64, + .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_64, + .htt_tx = ath10k_htt_tx_64, + .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_64, + .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64, + .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_v2, +}; + +static const struct ath10k_htt_tx_ops htt_tx_ops_hl = { + .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_hl, + .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32, + .htt_tx = ath10k_htt_tx_hl, + .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32, + .htt_flush_tx = ath10k_htt_flush_tx_queue, +}; + +void ath10k_htt_set_tx_ops(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + + if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) + htt->tx_ops = &htt_tx_ops_hl; + else if (ar->hw_params.target_64bit) + htt->tx_ops = &htt_tx_ops_64; + else + htt->tx_ops = &htt_tx_ops_32; +} diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c new file mode 100644 index 000000000000..59b6cebfdd8f --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -0,0 +1,1157 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/types.h> +#include <linux/bitops.h> +#include <linux/bitfield.h> +#include "core.h" +#include "hw.h" +#include "hif.h" +#include "wmi-ops.h" +#include "bmi.h" +#include "rx_desc.h" + +const struct ath10k_hw_regs qca988x_regs = { + .rtc_soc_base_address = 0x00004000, + .rtc_wmac_base_address = 0x00005000, + .soc_core_base_address = 0x00009000, + .wlan_mac_base_address = 0x00020000, + .ce_wrapper_base_address = 0x00057000, + .ce0_base_address = 0x00057400, + .ce1_base_address = 0x00057800, + .ce2_base_address = 0x00057c00, + .ce3_base_address = 0x00058000, + .ce4_base_address = 0x00058400, + .ce5_base_address = 0x00058800, + .ce6_base_address = 0x00058c00, + .ce7_base_address = 0x00059000, + .soc_reset_control_si0_rst_mask = 0x00000001, + .soc_reset_control_ce_rst_mask = 0x00040000, + .soc_chip_id_address = 0x000000ec, + .scratch_3_address = 0x00000030, + .fw_indicator_address = 0x00009030, + .pcie_local_base_address = 0x00080000, + .ce_wrap_intr_sum_host_msi_lsb = 0x00000008, + .ce_wrap_intr_sum_host_msi_mask = 0x0000ff00, + .pcie_intr_fw_mask = 0x00000400, + .pcie_intr_ce_mask_all = 0x0007f800, + .pcie_intr_clr_address = 0x00000014, +}; + +const struct ath10k_hw_regs qca6174_regs = { + .rtc_soc_base_address = 0x00000800, + .rtc_wmac_base_address = 0x00001000, + .soc_core_base_address = 0x0003a000, + .wlan_mac_base_address = 0x00010000, + .ce_wrapper_base_address = 0x00034000, + .ce0_base_address = 0x00034400, + .ce1_base_address = 0x00034800, + .ce2_base_address = 0x00034c00, + .ce3_base_address = 0x00035000, + .ce4_base_address = 0x00035400, + .ce5_base_address = 0x00035800, + .ce6_base_address = 0x00035c00, + .ce7_base_address = 0x00036000, + .soc_reset_control_si0_rst_mask = 0x00000000, + .soc_reset_control_ce_rst_mask = 0x00000001, + .soc_chip_id_address = 0x000000f0, + .scratch_3_address = 0x00000028, + .fw_indicator_address = 0x0003a028, + .pcie_local_base_address = 0x00080000, + .ce_wrap_intr_sum_host_msi_lsb = 0x00000008, + .ce_wrap_intr_sum_host_msi_mask = 0x0000ff00, + .pcie_intr_fw_mask = 0x00000400, + .pcie_intr_ce_mask_all = 0x0007f800, + .pcie_intr_clr_address = 0x00000014, + .cpu_pll_init_address = 0x00404020, + .cpu_speed_address = 0x00404024, + .core_clk_div_address = 0x00404028, +}; + +const struct ath10k_hw_regs qca99x0_regs = { + .rtc_soc_base_address = 0x00080000, + .rtc_wmac_base_address = 0x00000000, + .soc_core_base_address = 0x00082000, + .wlan_mac_base_address = 0x00030000, + .ce_wrapper_base_address = 0x0004d000, + .ce0_base_address = 0x0004a000, + .ce1_base_address = 0x0004a400, + .ce2_base_address = 0x0004a800, + .ce3_base_address = 0x0004ac00, + .ce4_base_address = 0x0004b000, + .ce5_base_address = 0x0004b400, + .ce6_base_address = 0x0004b800, + .ce7_base_address = 0x0004bc00, + /* Note: qca99x0 supports up to 12 Copy Engines. Other than address of + * CE0 and CE1 no other copy engine is directly referred in the code. + * It is not really necessary to assign address for newly supported + * CEs in this address table. + * Copy Engine Address + * CE8 0x0004c000 + * CE9 0x0004c400 + * CE10 0x0004c800 + * CE11 0x0004cc00 + */ + .soc_reset_control_si0_rst_mask = 0x00000001, + .soc_reset_control_ce_rst_mask = 0x00000100, + .soc_chip_id_address = 0x000000ec, + .scratch_3_address = 0x00040050, + .fw_indicator_address = 0x00040050, + .pcie_local_base_address = 0x00000000, + .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c, + .ce_wrap_intr_sum_host_msi_mask = 0x00fff000, + .pcie_intr_fw_mask = 0x00100000, + .pcie_intr_ce_mask_all = 0x000fff00, + .pcie_intr_clr_address = 0x00000010, +}; + +const struct ath10k_hw_regs qca4019_regs = { + .rtc_soc_base_address = 0x00080000, + .soc_core_base_address = 0x00082000, + .wlan_mac_base_address = 0x00030000, + .ce_wrapper_base_address = 0x0004d000, + .ce0_base_address = 0x0004a000, + .ce1_base_address = 0x0004a400, + .ce2_base_address = 0x0004a800, + .ce3_base_address = 0x0004ac00, + .ce4_base_address = 0x0004b000, + .ce5_base_address = 0x0004b400, + .ce6_base_address = 0x0004b800, + .ce7_base_address = 0x0004bc00, + /* qca4019 supports up to 12 copy engines. Since base address + * of ce8 to ce11 are not directly referred in the code, + * no need have them in separate members in this table. + * Copy Engine Address + * CE8 0x0004c000 + * CE9 0x0004c400 + * CE10 0x0004c800 + * CE11 0x0004cc00 + */ + .soc_reset_control_si0_rst_mask = 0x00000001, + .soc_reset_control_ce_rst_mask = 0x00000100, + .soc_chip_id_address = 0x000000ec, + .fw_indicator_address = 0x0004f00c, + .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c, + .ce_wrap_intr_sum_host_msi_mask = 0x00fff000, + .pcie_intr_fw_mask = 0x00100000, + .pcie_intr_ce_mask_all = 0x000fff00, + .pcie_intr_clr_address = 0x00000010, +}; + +const struct ath10k_hw_values qca988x_values = { + .rtc_state_val_on = 3, + .ce_count = 8, + .msi_assign_ce_max = 7, + .num_target_ce_config_wlan = 7, + .ce_desc_meta_data_mask = 0xFFFC, + .ce_desc_meta_data_lsb = 2, +}; + +const struct ath10k_hw_values qca6174_values = { + .rtc_state_val_on = 3, + .ce_count = 8, + .msi_assign_ce_max = 7, + .num_target_ce_config_wlan = 7, + .ce_desc_meta_data_mask = 0xFFFC, + .ce_desc_meta_data_lsb = 2, + .rfkill_pin = 16, + .rfkill_cfg = 0, + .rfkill_on_level = 1, +}; + +const struct ath10k_hw_values qca99x0_values = { + .rtc_state_val_on = 7, + .ce_count = 12, + .msi_assign_ce_max = 12, + .num_target_ce_config_wlan = 10, + .ce_desc_meta_data_mask = 0xFFF0, + .ce_desc_meta_data_lsb = 4, +}; + +const struct ath10k_hw_values qca9888_values = { + .rtc_state_val_on = 3, + .ce_count = 12, + .msi_assign_ce_max = 12, + .num_target_ce_config_wlan = 10, + .ce_desc_meta_data_mask = 0xFFF0, + .ce_desc_meta_data_lsb = 4, +}; + +const struct ath10k_hw_values qca4019_values = { + .ce_count = 12, + .num_target_ce_config_wlan = 10, + .ce_desc_meta_data_mask = 0xFFF0, + .ce_desc_meta_data_lsb = 4, +}; + +const struct ath10k_hw_regs wcn3990_regs = { + .rtc_soc_base_address = 0x00000000, + .rtc_wmac_base_address = 0x00000000, + .soc_core_base_address = 0x00000000, + .ce_wrapper_base_address = 0x0024C000, + .ce0_base_address = 0x00240000, + .ce1_base_address = 0x00241000, + .ce2_base_address = 0x00242000, + .ce3_base_address = 0x00243000, + .ce4_base_address = 0x00244000, + .ce5_base_address = 0x00245000, + .ce6_base_address = 0x00246000, + .ce7_base_address = 0x00247000, + .ce8_base_address = 0x00248000, + .ce9_base_address = 0x00249000, + .ce10_base_address = 0x0024A000, + .ce11_base_address = 0x0024B000, + .soc_chip_id_address = 0x000000f0, + .soc_reset_control_si0_rst_mask = 0x00000001, + .soc_reset_control_ce_rst_mask = 0x00000100, + .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c, + .ce_wrap_intr_sum_host_msi_mask = 0x00fff000, + .pcie_intr_fw_mask = 0x00100000, +}; + +static const struct ath10k_hw_ce_regs_addr_map wcn3990_src_ring = { + .msb = 0x00000010, + .lsb = 0x00000010, + .mask = GENMASK(17, 17), +}; + +static const struct ath10k_hw_ce_regs_addr_map wcn3990_dst_ring = { + .msb = 0x00000012, + .lsb = 0x00000012, + .mask = GENMASK(18, 18), +}; + +static const struct ath10k_hw_ce_regs_addr_map wcn3990_dmax = { + .msb = 0x00000000, + .lsb = 0x00000000, + .mask = GENMASK(15, 0), +}; + +static const struct ath10k_hw_ce_ctrl1 wcn3990_ctrl1 = { + .addr = 0x00000018, + .src_ring = &wcn3990_src_ring, + .dst_ring = &wcn3990_dst_ring, + .dmax = &wcn3990_dmax, +}; + +static const struct ath10k_hw_ce_regs_addr_map wcn3990_host_ie_cc = { + .mask = GENMASK(0, 0), +}; + +static const struct ath10k_hw_ce_host_ie wcn3990_host_ie = { + .copy_complete = &wcn3990_host_ie_cc, +}; + +static const struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = { + .dstr_lmask = 0x00000010, + .dstr_hmask = 0x00000008, + .srcr_lmask = 0x00000004, + .srcr_hmask = 0x00000002, + .cc_mask = 0x00000001, + .wm_mask = 0x0000001E, + .addr = 0x00000030, +}; + +static const struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = { + .axi_err = 0x00000100, + .dstr_add_err = 0x00000200, + .srcr_len_err = 0x00000100, + .dstr_mlen_vio = 0x00000080, + .dstr_overflow = 0x00000040, + .srcr_overflow = 0x00000020, + .err_mask = 0x000003E0, + .addr = 0x00000038, +}; + +static const struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_low = { + .msb = 0x00000000, + .lsb = 0x00000010, + .mask = GENMASK(31, 16), +}; + +static const struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_high = { + .msb = 0x0000000f, + .lsb = 0x00000000, + .mask = GENMASK(15, 0), +}; + +static const struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = { + .addr = 0x0000004c, + .low_rst = 0x00000000, + .high_rst = 0x00000000, + .wm_low = &wcn3990_src_wm_low, + .wm_high = &wcn3990_src_wm_high, +}; + +static const struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_low = { + .lsb = 0x00000010, + .mask = GENMASK(31, 16), +}; + +static const struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_high = { + .msb = 0x0000000f, + .lsb = 0x00000000, + .mask = GENMASK(15, 0), +}; + +static const struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = { + .addr = 0x00000050, + .low_rst = 0x00000000, + .high_rst = 0x00000000, + .wm_low = &wcn3990_dst_wm_low, + .wm_high = &wcn3990_dst_wm_high, +}; + +static const struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = { + .shift = 19, + .mask = 0x00080000, + .enable = 0x00000000, +}; + +const struct ath10k_hw_ce_regs wcn3990_ce_regs = { + .sr_base_addr_lo = 0x00000000, + .sr_base_addr_hi = 0x00000004, + .sr_size_addr = 0x00000008, + .dr_base_addr_lo = 0x0000000c, + .dr_base_addr_hi = 0x00000010, + .dr_size_addr = 0x00000014, + .misc_ie_addr = 0x00000034, + .sr_wr_index_addr = 0x0000003c, + .dst_wr_index_addr = 0x00000040, + .current_srri_addr = 0x00000044, + .current_drri_addr = 0x00000048, + .ce_rri_low = 0x0024C004, + .ce_rri_high = 0x0024C008, + .host_ie_addr = 0x0000002c, + .ctrl1_regs = &wcn3990_ctrl1, + .host_ie = &wcn3990_host_ie, + .wm_regs = &wcn3990_wm_reg, + .misc_regs = &wcn3990_misc_reg, + .wm_srcr = &wcn3990_wm_src_ring, + .wm_dstr = &wcn3990_wm_dst_ring, + .upd = &wcn3990_ctrl1_upd, +}; + +const struct ath10k_hw_values wcn3990_values = { + .rtc_state_val_on = 5, + .ce_count = 12, + .msi_assign_ce_max = 12, + .num_target_ce_config_wlan = 12, + .ce_desc_meta_data_mask = 0xFFF0, + .ce_desc_meta_data_lsb = 4, +}; + +static const struct ath10k_hw_ce_regs_addr_map qcax_src_ring = { + .msb = 0x00000010, + .lsb = 0x00000010, + .mask = GENMASK(16, 16), +}; + +static const struct ath10k_hw_ce_regs_addr_map qcax_dst_ring = { + .msb = 0x00000011, + .lsb = 0x00000011, + .mask = GENMASK(17, 17), +}; + +static const struct ath10k_hw_ce_regs_addr_map qcax_dmax = { + .msb = 0x0000000f, + .lsb = 0x00000000, + .mask = GENMASK(15, 0), +}; + +static const struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = { + .addr = 0x00000010, + .hw_mask = 0x0007ffff, + .sw_mask = 0x0007ffff, + .hw_wr_mask = 0x00000000, + .sw_wr_mask = 0x0007ffff, + .reset_mask = 0xffffffff, + .reset = 0x00000080, + .src_ring = &qcax_src_ring, + .dst_ring = &qcax_dst_ring, + .dmax = &qcax_dmax, +}; + +static const struct ath10k_hw_ce_regs_addr_map qcax_cmd_halt_status = { + .msb = 0x00000003, + .lsb = 0x00000003, + .mask = GENMASK(3, 3), +}; + +static const struct ath10k_hw_ce_cmd_halt qcax_cmd_halt = { + .msb = 0x00000000, + .mask = GENMASK(0, 0), + .status_reset = 0x00000000, + .status = &qcax_cmd_halt_status, +}; + +static const struct ath10k_hw_ce_regs_addr_map qcax_host_ie_cc = { + .msb = 0x00000000, + .lsb = 0x00000000, + .mask = GENMASK(0, 0), +}; + +static const struct ath10k_hw_ce_host_ie qcax_host_ie = { + .copy_complete_reset = 0x00000000, + .copy_complete = &qcax_host_ie_cc, +}; + +static const struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = { + .dstr_lmask = 0x00000010, + .dstr_hmask = 0x00000008, + .srcr_lmask = 0x00000004, + .srcr_hmask = 0x00000002, + .cc_mask = 0x00000001, + .wm_mask = 0x0000001E, + .addr = 0x00000030, +}; + +static const struct ath10k_hw_ce_misc_regs qcax_misc_reg = { + .axi_err = 0x00000400, + .dstr_add_err = 0x00000200, + .srcr_len_err = 0x00000100, + .dstr_mlen_vio = 0x00000080, + .dstr_overflow = 0x00000040, + .srcr_overflow = 0x00000020, + .err_mask = 0x000007E0, + .addr = 0x00000038, +}; + +static const struct ath10k_hw_ce_regs_addr_map qcax_src_wm_low = { + .msb = 0x0000001f, + .lsb = 0x00000010, + .mask = GENMASK(31, 16), +}; + +static const struct ath10k_hw_ce_regs_addr_map qcax_src_wm_high = { + .msb = 0x0000000f, + .lsb = 0x00000000, + .mask = GENMASK(15, 0), +}; + +static const struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = { + .addr = 0x0000004c, + .low_rst = 0x00000000, + .high_rst = 0x00000000, + .wm_low = &qcax_src_wm_low, + .wm_high = &qcax_src_wm_high, +}; + +static const struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_low = { + .lsb = 0x00000010, + .mask = GENMASK(31, 16), +}; + +static const struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_high = { + .msb = 0x0000000f, + .lsb = 0x00000000, + .mask = GENMASK(15, 0), +}; + +static const struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = { + .addr = 0x00000050, + .low_rst = 0x00000000, + .high_rst = 0x00000000, + .wm_low = &qcax_dst_wm_low, + .wm_high = &qcax_dst_wm_high, +}; + +const struct ath10k_hw_ce_regs qcax_ce_regs = { + .sr_base_addr_lo = 0x00000000, + .sr_size_addr = 0x00000004, + .dr_base_addr_lo = 0x00000008, + .dr_size_addr = 0x0000000c, + .ce_cmd_addr = 0x00000018, + .misc_ie_addr = 0x00000034, + .sr_wr_index_addr = 0x0000003c, + .dst_wr_index_addr = 0x00000040, + .current_srri_addr = 0x00000044, + .current_drri_addr = 0x00000048, + .host_ie_addr = 0x0000002c, + .ctrl1_regs = &qcax_ctrl1, + .cmd_halt = &qcax_cmd_halt, + .host_ie = &qcax_host_ie, + .wm_regs = &qcax_wm_reg, + .misc_regs = &qcax_misc_reg, + .wm_srcr = &qcax_wm_src_ring, + .wm_dstr = &qcax_wm_dst_ring, +}; + +const struct ath10k_hw_clk_params qca6174_clk[ATH10K_HW_REFCLK_COUNT] = { + { + .refclk = 48000000, + .div = 0xe, + .rnfrac = 0x2aaa8, + .settle_time = 2400, + .refdiv = 0, + .outdiv = 1, + }, + { + .refclk = 19200000, + .div = 0x24, + .rnfrac = 0x2aaa8, + .settle_time = 960, + .refdiv = 0, + .outdiv = 1, + }, + { + .refclk = 24000000, + .div = 0x1d, + .rnfrac = 0x15551, + .settle_time = 1200, + .refdiv = 0, + .outdiv = 1, + }, + { + .refclk = 26000000, + .div = 0x1b, + .rnfrac = 0x4ec4, + .settle_time = 1300, + .refdiv = 0, + .outdiv = 1, + }, + { + .refclk = 37400000, + .div = 0x12, + .rnfrac = 0x34b49, + .settle_time = 1870, + .refdiv = 0, + .outdiv = 1, + }, + { + .refclk = 38400000, + .div = 0x12, + .rnfrac = 0x15551, + .settle_time = 1920, + .refdiv = 0, + .outdiv = 1, + }, + { + .refclk = 40000000, + .div = 0x12, + .rnfrac = 0x26665, + .settle_time = 2000, + .refdiv = 0, + .outdiv = 1, + }, + { + .refclk = 52000000, + .div = 0x1b, + .rnfrac = 0x4ec4, + .settle_time = 2600, + .refdiv = 0, + .outdiv = 1, + }, +}; + +void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, + u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev) +{ + u32 cc_fix = 0; + u32 rcc_fix = 0; + enum ath10k_hw_cc_wraparound_type wraparound_type; + + survey->filled |= SURVEY_INFO_TIME | + SURVEY_INFO_TIME_BUSY; + + wraparound_type = ar->hw_params.cc_wraparound_type; + + if (cc < cc_prev || rcc < rcc_prev) { + switch (wraparound_type) { + case ATH10K_HW_CC_WRAP_SHIFTED_ALL: + if (cc < cc_prev) { + cc_fix = 0x7fffffff; + survey->filled &= ~SURVEY_INFO_TIME_BUSY; + } + break; + case ATH10K_HW_CC_WRAP_SHIFTED_EACH: + if (cc < cc_prev) + cc_fix = 0x7fffffff; + + if (rcc < rcc_prev) + rcc_fix = 0x7fffffff; + break; + case ATH10K_HW_CC_WRAP_DISABLED: + break; + } + } + + cc -= cc_prev - cc_fix; + rcc -= rcc_prev - rcc_fix; + + survey->time = CCNT_TO_MSEC(ar, cc); + survey->time_busy = CCNT_TO_MSEC(ar, rcc); +} + +/* The firmware does not support setting the coverage class. Instead this + * function monitors and modifies the corresponding MAC registers. + */ +static void ath10k_hw_qca988x_set_coverage_class(struct ath10k *ar, + int radio_idx, + s16 value) +{ + u32 slottime_reg; + u32 slottime; + u32 timeout_reg; + u32 ack_timeout; + u32 cts_timeout; + u32 phyclk_reg; + u32 phyclk; + u64 fw_dbglog_mask; + u32 fw_dbglog_level; + + mutex_lock(&ar->conf_mutex); + + /* Only modify registers if the core is started. */ + if ((ar->state != ATH10K_STATE_ON) && + (ar->state != ATH10K_STATE_RESTARTED)) { + spin_lock_bh(&ar->data_lock); + /* Store config value for when radio boots up */ + ar->fw_coverage.coverage_class = value; + spin_unlock_bh(&ar->data_lock); + goto unlock; + } + + /* Retrieve the current values of the two registers that need to be + * adjusted. + */ + slottime_reg = ath10k_hif_read32(ar, WLAN_MAC_BASE_ADDRESS + + WAVE1_PCU_GBL_IFS_SLOT); + timeout_reg = ath10k_hif_read32(ar, WLAN_MAC_BASE_ADDRESS + + WAVE1_PCU_ACK_CTS_TIMEOUT); + phyclk_reg = ath10k_hif_read32(ar, WLAN_MAC_BASE_ADDRESS + + WAVE1_PHYCLK); + phyclk = MS(phyclk_reg, WAVE1_PHYCLK_USEC) + 1; + + if (value < 0) + value = ar->fw_coverage.coverage_class; + + /* Break out if the coverage class and registers have the expected + * value. + */ + if (value == ar->fw_coverage.coverage_class && + slottime_reg == ar->fw_coverage.reg_slottime_conf && + timeout_reg == ar->fw_coverage.reg_ack_cts_timeout_conf && + phyclk_reg == ar->fw_coverage.reg_phyclk) + goto unlock; + + /* Store new initial register values from the firmware. */ + if (slottime_reg != ar->fw_coverage.reg_slottime_conf) + ar->fw_coverage.reg_slottime_orig = slottime_reg; + if (timeout_reg != ar->fw_coverage.reg_ack_cts_timeout_conf) + ar->fw_coverage.reg_ack_cts_timeout_orig = timeout_reg; + ar->fw_coverage.reg_phyclk = phyclk_reg; + + /* Calculate new value based on the (original) firmware calculation. */ + slottime_reg = ar->fw_coverage.reg_slottime_orig; + timeout_reg = ar->fw_coverage.reg_ack_cts_timeout_orig; + + /* Do some sanity checks on the slottime register. */ + if (slottime_reg % phyclk) { + ath10k_warn(ar, + "failed to set coverage class: expected integer microsecond value in register\n"); + + goto store_regs; + } + + slottime = MS(slottime_reg, WAVE1_PCU_GBL_IFS_SLOT); + slottime = slottime / phyclk; + if (slottime != 9 && slottime != 20) { + ath10k_warn(ar, + "failed to set coverage class: expected slot time of 9 or 20us in HW register. It is %uus.\n", + slottime); + + goto store_regs; + } + + /* Recalculate the register values by adding the additional propagation + * delay (3us per coverage class). + */ + + slottime = MS(slottime_reg, WAVE1_PCU_GBL_IFS_SLOT); + slottime += value * 3 * phyclk; + slottime = min_t(u32, slottime, WAVE1_PCU_GBL_IFS_SLOT_MAX); + slottime = SM(slottime, WAVE1_PCU_GBL_IFS_SLOT); + slottime_reg = (slottime_reg & ~WAVE1_PCU_GBL_IFS_SLOT_MASK) | slottime; + + /* Update ack timeout (lower halfword). */ + ack_timeout = MS(timeout_reg, WAVE1_PCU_ACK_CTS_TIMEOUT_ACK); + ack_timeout += 3 * value * phyclk; + ack_timeout = min_t(u32, ack_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_MAX); + ack_timeout = SM(ack_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_ACK); + + /* Update cts timeout (upper halfword). */ + cts_timeout = MS(timeout_reg, WAVE1_PCU_ACK_CTS_TIMEOUT_CTS); + cts_timeout += 3 * value * phyclk; + cts_timeout = min_t(u32, cts_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_MAX); + cts_timeout = SM(cts_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_CTS); + + timeout_reg = ack_timeout | cts_timeout; + + ath10k_hif_write32(ar, + WLAN_MAC_BASE_ADDRESS + WAVE1_PCU_GBL_IFS_SLOT, + slottime_reg); + ath10k_hif_write32(ar, + WLAN_MAC_BASE_ADDRESS + WAVE1_PCU_ACK_CTS_TIMEOUT, + timeout_reg); + + /* Ensure we have a debug level of WARN set for the case that the + * coverage class is larger than 0. This is important as we need to + * set the registers again if the firmware does an internal reset and + * this way we will be notified of the event. + */ + fw_dbglog_mask = ath10k_debug_get_fw_dbglog_mask(ar); + fw_dbglog_level = ath10k_debug_get_fw_dbglog_level(ar); + + if (value > 0) { + if (fw_dbglog_level > ATH10K_DBGLOG_LEVEL_WARN) + fw_dbglog_level = ATH10K_DBGLOG_LEVEL_WARN; + fw_dbglog_mask = ~0; + } + + ath10k_wmi_dbglog_cfg(ar, fw_dbglog_mask, fw_dbglog_level); + +store_regs: + /* After an error we will not retry setting the coverage class. */ + spin_lock_bh(&ar->data_lock); + ar->fw_coverage.coverage_class = value; + spin_unlock_bh(&ar->data_lock); + + ar->fw_coverage.reg_slottime_conf = slottime_reg; + ar->fw_coverage.reg_ack_cts_timeout_conf = timeout_reg; + +unlock: + mutex_unlock(&ar->conf_mutex); +} + +/** + * ath10k_hw_qca6174_enable_pll_clock() - enable the qca6174 hw pll clock + * @ar: the ath10k blob + * + * This function is very hardware specific, the clock initialization + * steps is very sensitive and could lead to unknown crash, so they + * should be done in sequence. + * + * *** Be aware if you planned to refactor them. *** + * + * Return: 0 if successfully enable the pll, otherwise EINVAL + */ +static int ath10k_hw_qca6174_enable_pll_clock(struct ath10k *ar) +{ + int ret, wait_limit; + u32 clk_div_addr, pll_init_addr, speed_addr; + u32 addr, reg_val, mem_val; + struct ath10k_hw_params *hw; + const struct ath10k_hw_clk_params *hw_clk; + + hw = &ar->hw_params; + + if (ar->regs->core_clk_div_address == 0 || + ar->regs->cpu_pll_init_address == 0 || + ar->regs->cpu_speed_address == 0) + return -EINVAL; + + clk_div_addr = ar->regs->core_clk_div_address; + pll_init_addr = ar->regs->cpu_pll_init_address; + speed_addr = ar->regs->cpu_speed_address; + + /* Read efuse register to find out the right hw clock configuration */ + addr = (RTC_SOC_BASE_ADDRESS | EFUSE_OFFSET); + ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); + if (ret) + return -EINVAL; + + /* sanitize if the hw refclk index is out of the boundary */ + if (MS(reg_val, EFUSE_XTAL_SEL) > ATH10K_HW_REFCLK_COUNT) + return -EINVAL; + + hw_clk = &hw->hw_clk[MS(reg_val, EFUSE_XTAL_SEL)]; + + /* Set the rnfrac and outdiv params to bb_pll register */ + addr = (RTC_SOC_BASE_ADDRESS | BB_PLL_CONFIG_OFFSET); + ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); + if (ret) + return -EINVAL; + + reg_val &= ~(BB_PLL_CONFIG_FRAC_MASK | BB_PLL_CONFIG_OUTDIV_MASK); + reg_val |= (SM(hw_clk->rnfrac, BB_PLL_CONFIG_FRAC) | + SM(hw_clk->outdiv, BB_PLL_CONFIG_OUTDIV)); + ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val); + if (ret) + return -EINVAL; + + /* Set the correct settle time value to pll_settle register */ + addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_SETTLE_OFFSET); + ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); + if (ret) + return -EINVAL; + + reg_val &= ~WLAN_PLL_SETTLE_TIME_MASK; + reg_val |= SM(hw_clk->settle_time, WLAN_PLL_SETTLE_TIME); + ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val); + if (ret) + return -EINVAL; + + /* Set the clock_ctrl div to core_clk_ctrl register */ + addr = (RTC_SOC_BASE_ADDRESS | SOC_CORE_CLK_CTRL_OFFSET); + ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); + if (ret) + return -EINVAL; + + reg_val &= ~SOC_CORE_CLK_CTRL_DIV_MASK; + reg_val |= SM(1, SOC_CORE_CLK_CTRL_DIV); + ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val); + if (ret) + return -EINVAL; + + /* Set the clock_div register */ + mem_val = 1; + ret = ath10k_bmi_write_memory(ar, clk_div_addr, &mem_val, + sizeof(mem_val)); + if (ret) + return -EINVAL; + + /* Configure the pll_control register */ + addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET); + ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); + if (ret) + return -EINVAL; + + reg_val |= (SM(hw_clk->refdiv, WLAN_PLL_CONTROL_REFDIV) | + SM(hw_clk->div, WLAN_PLL_CONTROL_DIV) | + SM(1, WLAN_PLL_CONTROL_NOPWD)); + ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val); + if (ret) + return -EINVAL; + + /* busy wait (max 1s) the rtc_sync status register indicate ready */ + wait_limit = 100000; + addr = (RTC_WMAC_BASE_ADDRESS | RTC_SYNC_STATUS_OFFSET); + do { + ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); + if (ret) + return -EINVAL; + + if (!MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING)) + break; + + wait_limit--; + udelay(10); + + } while (wait_limit > 0); + + if (MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING)) + return -EINVAL; + + /* Unset the pll_bypass in pll_control register */ + addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET); + ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); + if (ret) + return -EINVAL; + + reg_val &= ~WLAN_PLL_CONTROL_BYPASS_MASK; + reg_val |= SM(0, WLAN_PLL_CONTROL_BYPASS); + ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val); + if (ret) + return -EINVAL; + + /* busy wait (max 1s) the rtc_sync status register indicate ready */ + wait_limit = 100000; + addr = (RTC_WMAC_BASE_ADDRESS | RTC_SYNC_STATUS_OFFSET); + do { + ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); + if (ret) + return -EINVAL; + + if (!MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING)) + break; + + wait_limit--; + udelay(10); + + } while (wait_limit > 0); + + if (MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING)) + return -EINVAL; + + /* Enable the hardware cpu clock register */ + addr = (RTC_SOC_BASE_ADDRESS | SOC_CPU_CLOCK_OFFSET); + ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); + if (ret) + return -EINVAL; + + reg_val &= ~SOC_CPU_CLOCK_STANDARD_MASK; + reg_val |= SM(1, SOC_CPU_CLOCK_STANDARD); + ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val); + if (ret) + return -EINVAL; + + /* unset the nopwd from pll_control register */ + addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET); + ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); + if (ret) + return -EINVAL; + + reg_val &= ~WLAN_PLL_CONTROL_NOPWD_MASK; + ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val); + if (ret) + return -EINVAL; + + /* enable the pll_init register */ + mem_val = 1; + ret = ath10k_bmi_write_memory(ar, pll_init_addr, &mem_val, + sizeof(mem_val)); + if (ret) + return -EINVAL; + + /* set the target clock frequency to speed register */ + ret = ath10k_bmi_write_memory(ar, speed_addr, &hw->target_cpu_freq, + sizeof(hw->target_cpu_freq)); + if (ret) + return -EINVAL; + + return 0; +} + +/* Program CPU_ADDR_MSB to allow different memory + * region access. + */ +static void ath10k_hw_map_target_mem(struct ath10k *ar, u32 msb) +{ + u32 address = SOC_CORE_BASE_ADDRESS + FW_RAM_CONFIG_ADDRESS; + + ath10k_hif_write32(ar, address, msb); +} + +/* 1. Write to memory region of target, such as IRAM and DRAM. + * 2. Target address( 0 ~ 00100000 & 0x00400000~0x00500000) + * can be written directly. See ath10k_pci_targ_cpu_to_ce_addr() too. + * 3. In order to access the region other than the above, + * we need to set the value of register CPU_ADDR_MSB. + * 4. Target memory access space is limited to 1M size. If the size is larger + * than 1M, need to split it and program CPU_ADDR_MSB accordingly. + */ +static int ath10k_hw_diag_segment_msb_download(struct ath10k *ar, + const void *buffer, + u32 address, + u32 length) +{ + u32 addr = address & REGION_ACCESS_SIZE_MASK; + int ret, remain_size, size; + const u8 *buf; + + ath10k_hw_map_target_mem(ar, CPU_ADDR_MSB_REGION_VAL(address)); + + if (addr + length > REGION_ACCESS_SIZE_LIMIT) { + size = REGION_ACCESS_SIZE_LIMIT - addr; + remain_size = length - size; + + ret = ath10k_hif_diag_write(ar, address, buffer, size); + if (ret) { + ath10k_warn(ar, + "failed to download the first %d bytes segment to address:0x%x: %d\n", + size, address, ret); + goto done; + } + + /* Change msb to the next memory region*/ + ath10k_hw_map_target_mem(ar, + CPU_ADDR_MSB_REGION_VAL(address) + 1); + buf = buffer + size; + ret = ath10k_hif_diag_write(ar, + address & ~REGION_ACCESS_SIZE_MASK, + buf, remain_size); + if (ret) { + ath10k_warn(ar, + "failed to download the second %d bytes segment to address:0x%x: %d\n", + remain_size, + address & ~REGION_ACCESS_SIZE_MASK, + ret); + goto done; + } + } else { + ret = ath10k_hif_diag_write(ar, address, buffer, length); + if (ret) { + ath10k_warn(ar, + "failed to download the only %d bytes segment to address:0x%x: %d\n", + length, address, ret); + goto done; + } + } + +done: + /* Change msb to DRAM */ + ath10k_hw_map_target_mem(ar, + CPU_ADDR_MSB_REGION_VAL(DRAM_BASE_ADDRESS)); + return ret; +} + +static int ath10k_hw_diag_segment_download(struct ath10k *ar, + const void *buffer, + u32 address, + u32 length) +{ + if (address >= DRAM_BASE_ADDRESS + REGION_ACCESS_SIZE_LIMIT) + /* Needs to change MSB for memory write */ + return ath10k_hw_diag_segment_msb_download(ar, buffer, + address, length); + else + return ath10k_hif_diag_write(ar, address, buffer, length); +} + +int ath10k_hw_diag_fast_download(struct ath10k *ar, + u32 address, + const void *buffer, + u32 length) +{ + const u8 *buf = buffer; + bool sgmt_end = false; + u32 base_addr = 0; + u32 base_len = 0; + u32 left = 0; + struct bmi_segmented_file_header *hdr; + struct bmi_segmented_metadata *metadata; + int ret = 0; + + if (length < sizeof(*hdr)) + return -EINVAL; + + /* check firmware header. If it has no correct magic number + * or it's compressed, returns error. + */ + hdr = (struct bmi_segmented_file_header *)buf; + if (__le32_to_cpu(hdr->magic_num) != BMI_SGMTFILE_MAGIC_NUM) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "Not a supported firmware, magic_num:0x%x\n", + hdr->magic_num); + return -EINVAL; + } + + if (hdr->file_flags != 0) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "Not a supported firmware, file_flags:0x%x\n", + hdr->file_flags); + return -EINVAL; + } + + metadata = (struct bmi_segmented_metadata *)hdr->data; + left = length - sizeof(*hdr); + + while (left > 0) { + if (left < sizeof(*metadata)) { + ath10k_warn(ar, "firmware segment is truncated: %d\n", + left); + ret = -EINVAL; + break; + } + base_addr = __le32_to_cpu(metadata->addr); + base_len = __le32_to_cpu(metadata->length); + buf = metadata->data; + left -= sizeof(*metadata); + + switch (base_len) { + case BMI_SGMTFILE_BEGINADDR: + /* base_addr is the start address to run */ + ret = ath10k_bmi_set_start(ar, base_addr); + base_len = 0; + break; + case BMI_SGMTFILE_DONE: + /* no more segment */ + base_len = 0; + sgmt_end = true; + ret = 0; + break; + case BMI_SGMTFILE_BDDATA: + case BMI_SGMTFILE_EXEC: + ath10k_warn(ar, + "firmware has unsupported segment:%d\n", + base_len); + ret = -EINVAL; + break; + default: + if (base_len > left) { + /* sanity check */ + ath10k_warn(ar, + "firmware has invalid segment length, %d > %d\n", + base_len, left); + ret = -EINVAL; + break; + } + + ret = ath10k_hw_diag_segment_download(ar, + buf, + base_addr, + base_len); + + if (ret) + ath10k_warn(ar, + "failed to download firmware via diag interface:%d\n", + ret); + break; + } + + if (ret || sgmt_end) + break; + + metadata = (struct bmi_segmented_metadata *)(buf + base_len); + left -= base_len; + } + + if (ret == 0) + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot firmware fast diag download successfully.\n"); + return ret; +} + +static int ath10k_htt_tx_rssi_enable(struct htt_resp *resp) +{ + return (resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_DATA_RSSI); +} + +static int ath10k_htt_tx_rssi_enable_wcn3990(struct htt_resp *resp) +{ + return (resp->data_tx_completion.flags2 & + HTT_TX_DATA_RSSI_ENABLE_WCN3990); +} + +static int ath10k_get_htt_tx_data_rssi_pad(struct htt_resp *resp) +{ + struct htt_data_tx_completion_ext extd; + int pad_bytes = 0; + + if (resp->data_tx_completion.flags2 & HTT_TX_DATA_APPEND_RETRIES) + pad_bytes += sizeof(extd.a_retries) / + sizeof(extd.msdus_rssi[0]); + + if (resp->data_tx_completion.flags2 & HTT_TX_DATA_APPEND_TIMESTAMP) + pad_bytes += sizeof(extd.t_stamp) / sizeof(extd.msdus_rssi[0]); + + return pad_bytes; +} + +const struct ath10k_hw_ops qca988x_ops = { + .set_coverage_class = ath10k_hw_qca988x_set_coverage_class, + .is_rssi_enable = ath10k_htt_tx_rssi_enable, +}; + +const struct ath10k_hw_ops qca99x0_ops = { + .is_rssi_enable = ath10k_htt_tx_rssi_enable, +}; + +const struct ath10k_hw_ops qca6174_ops = { + .set_coverage_class = ath10k_hw_qca988x_set_coverage_class, + .enable_pll_clk = ath10k_hw_qca6174_enable_pll_clock, + .is_rssi_enable = ath10k_htt_tx_rssi_enable, +}; + +const struct ath10k_hw_ops qca6174_sdio_ops = { + .enable_pll_clk = ath10k_hw_qca6174_enable_pll_clock, +}; + +const struct ath10k_hw_ops wcn3990_ops = { + .tx_data_rssi_pad_bytes = ath10k_get_htt_tx_data_rssi_pad, + .is_rssi_enable = ath10k_htt_tx_rssi_enable_wcn3990, +}; diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 44ed5af0a204..da71dce9babf 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -1,18 +1,9 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _HW_H_ @@ -20,39 +11,420 @@ #include "targaddrs.h" -/* Supported FW version */ -#define SUPPORTED_FW_MAJOR 1 -#define SUPPORTED_FW_MINOR 0 -#define SUPPORTED_FW_RELEASE 0 -#define SUPPORTED_FW_BUILD 629 +enum ath10k_bus { + ATH10K_BUS_PCI, + ATH10K_BUS_AHB, + ATH10K_BUS_SDIO, + ATH10K_BUS_USB, + ATH10K_BUS_SNOC, +}; + +#define ATH10K_FW_DIR "ath10k" + +#define QCA988X_2_0_DEVICE_ID_UBNT (0x11ac) +#define QCA988X_2_0_DEVICE_ID (0x003c) +#define QCA6164_2_1_DEVICE_ID (0x0041) +#define QCA6174_2_1_DEVICE_ID (0x003e) +#define QCA6174_3_2_DEVICE_ID (0x0042) +#define QCA99X0_2_0_DEVICE_ID (0x0040) +#define QCA9888_2_0_DEVICE_ID (0x0056) +#define QCA9984_1_0_DEVICE_ID (0x0046) +#define QCA9377_1_0_DEVICE_ID (0x0042) +#define QCA9887_1_0_DEVICE_ID (0x0050) -/* QCA988X 1.0 definitions */ -#define QCA988X_HW_1_0_VERSION 0x4000002c -#define QCA988X_HW_1_0_FW_DIR "ath10k/QCA988X/hw1.0" -#define QCA988X_HW_1_0_FW_FILE "firmware.bin" -#define QCA988X_HW_1_0_OTP_FILE "otp.bin" -#define QCA988X_HW_1_0_BOARD_DATA_FILE "board.bin" -#define QCA988X_HW_1_0_PATCH_LOAD_ADDR 0x1234 +/* QCA988X 1.0 definitions (unsupported) */ +#define QCA988X_HW_1_0_CHIP_ID_REV 0x0 /* QCA988X 2.0 definitions */ #define QCA988X_HW_2_0_VERSION 0x4100016c -#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0" -#define QCA988X_HW_2_0_FW_FILE "firmware.bin" -#define QCA988X_HW_2_0_OTP_FILE "otp.bin" -#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin" +#define QCA988X_HW_2_0_CHIP_ID_REV 0x2 +#define QCA988X_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA988X/hw2.0" #define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234 -/* Known pecularities: - * - current FW doesn't support raw rx mode (last tested v599) - * - current FW dumps upon raw tx mode (last tested v599) +/* QCA9887 1.0 definitions */ +#define QCA9887_HW_1_0_VERSION 0x4100016d +#define QCA9887_HW_1_0_CHIP_ID_REV 0 +#define QCA9887_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9887/hw1.0" +#define QCA9887_HW_1_0_PATCH_LOAD_ADDR 0x1234 + +/* QCA6174 target BMI version signatures */ +#define QCA6174_HW_1_0_VERSION 0x05000000 +#define QCA6174_HW_1_1_VERSION 0x05000001 +#define QCA6174_HW_1_3_VERSION 0x05000003 +#define QCA6174_HW_2_1_VERSION 0x05010000 +#define QCA6174_HW_3_0_VERSION 0x05020000 +#define QCA6174_HW_3_2_VERSION 0x05030000 + +/* QCA9377 target BMI version signatures */ +#define QCA9377_HW_1_0_DEV_VERSION 0x05020000 +#define QCA9377_HW_1_1_DEV_VERSION 0x05020001 + +enum qca6174_pci_rev { + QCA6174_PCI_REV_1_1 = 0x11, + QCA6174_PCI_REV_1_3 = 0x13, + QCA6174_PCI_REV_2_0 = 0x20, + QCA6174_PCI_REV_3_0 = 0x30, +}; + +enum qca6174_chip_id_rev { + QCA6174_HW_1_0_CHIP_ID_REV = 0, + QCA6174_HW_1_1_CHIP_ID_REV = 1, + QCA6174_HW_1_3_CHIP_ID_REV = 2, + QCA6174_HW_2_1_CHIP_ID_REV = 4, + QCA6174_HW_2_2_CHIP_ID_REV = 5, + QCA6174_HW_3_0_CHIP_ID_REV = 8, + QCA6174_HW_3_1_CHIP_ID_REV = 9, + QCA6174_HW_3_2_CHIP_ID_REV = 10, +}; + +enum qca9377_chip_id_rev { + QCA9377_HW_1_0_CHIP_ID_REV = 0x0, + QCA9377_HW_1_1_CHIP_ID_REV = 0x1, +}; + +#define QCA6174_HW_2_1_FW_DIR ATH10K_FW_DIR "/QCA6174/hw2.1" +#define QCA6174_HW_2_1_PATCH_LOAD_ADDR 0x1234 + +#define QCA6174_HW_3_0_FW_DIR ATH10K_FW_DIR "/QCA6174/hw3.0" +#define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234 + +/* QCA99X0 1.0 definitions (unsupported) */ +#define QCA99X0_HW_1_0_CHIP_ID_REV 0x0 + +/* QCA99X0 2.0 definitions */ +#define QCA99X0_HW_2_0_DEV_VERSION 0x01000000 +#define QCA99X0_HW_2_0_CHIP_ID_REV 0x1 +#define QCA99X0_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA99X0/hw2.0" +#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234 + +/* QCA9984 1.0 defines */ +#define QCA9984_HW_1_0_DEV_VERSION 0x1000000 +#define QCA9984_HW_DEV_TYPE 0xa +#define QCA9984_HW_1_0_CHIP_ID_REV 0x0 +#define QCA9984_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9984/hw1.0" +#define QCA9984_HW_1_0_PATCH_LOAD_ADDR 0x1234 + +/* QCA9888 2.0 defines */ +#define QCA9888_HW_2_0_DEV_VERSION 0x1000000 +#define QCA9888_HW_DEV_TYPE 0xc +#define QCA9888_HW_2_0_CHIP_ID_REV 0x0 +#define QCA9888_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA9888/hw2.0" +#define QCA9888_HW_2_0_PATCH_LOAD_ADDR 0x1234 + +/* QCA9377 1.0 definitions */ +#define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0" +#define QCA9377_HW_1_0_PATCH_LOAD_ADDR 0x1234 + +/* QCA4019 1.0 definitions */ +#define QCA4019_HW_1_0_DEV_VERSION 0x01000000 +#define QCA4019_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA4019/hw1.0" +#define QCA4019_HW_1_0_PATCH_LOAD_ADDR 0x1234 + +/* WCN3990 1.0 definitions */ +#define WCN3990_HW_1_0_DEV_VERSION ATH10K_HW_WCN3990 +#define WCN3990_HW_1_0_FW_DIR ATH10K_FW_DIR "/WCN3990/hw1.0" + +#define ATH10K_FW_FILE_BASE "firmware" +#define ATH10K_FW_API_MAX 6 +#define ATH10K_FW_API_MIN 2 + +#define ATH10K_FW_API2_FILE "firmware-2.bin" +#define ATH10K_FW_API3_FILE "firmware-3.bin" + +/* added support for ATH10K_FW_IE_WMI_OP_VERSION */ +#define ATH10K_FW_API4_FILE "firmware-4.bin" + +/* HTT id conflict fix for management frames over HTT */ +#define ATH10K_FW_API5_FILE "firmware-5.bin" + +/* the firmware-6.bin blob */ +#define ATH10K_FW_API6_FILE "firmware-6.bin" + +#define ATH10K_FW_UTF_FILE "utf.bin" +#define ATH10K_FW_UTF_API2_FILE "utf-2.bin" + +#define ATH10K_FW_UTF_FILE_BASE "utf" + +/* includes also the null byte */ +#define ATH10K_FIRMWARE_MAGIC "QCA-ATH10K" +#define ATH10K_BOARD_MAGIC "QCA-ATH10K-BOARD" + +#define ATH10K_BOARD_DATA_FILE "board.bin" +#define ATH10K_BOARD_API2_FILE "board-2.bin" +#define ATH10K_EBOARD_DATA_FILE "eboard.bin" + +#define REG_DUMP_COUNT_QCA988X 60 + +struct ath10k_fw_ie { + __le32 id; + __le32 len; + u8 data[]; +}; + +enum ath10k_fw_ie_type { + ATH10K_FW_IE_FW_VERSION = 0, + ATH10K_FW_IE_TIMESTAMP = 1, + ATH10K_FW_IE_FEATURES = 2, + ATH10K_FW_IE_FW_IMAGE = 3, + ATH10K_FW_IE_OTP_IMAGE = 4, + + /* WMI "operations" interface version, 32 bit value. Supported from + * FW API 4 and above. + */ + ATH10K_FW_IE_WMI_OP_VERSION = 5, + + /* HTT "operations" interface version, 32 bit value. Supported from + * FW API 5 and above. + */ + ATH10K_FW_IE_HTT_OP_VERSION = 6, + + /* Code swap image for firmware binary */ + ATH10K_FW_IE_FW_CODE_SWAP_IMAGE = 7, +}; + +enum ath10k_fw_wmi_op_version { + ATH10K_FW_WMI_OP_VERSION_UNSET = 0, + + ATH10K_FW_WMI_OP_VERSION_MAIN = 1, + ATH10K_FW_WMI_OP_VERSION_10_1 = 2, + ATH10K_FW_WMI_OP_VERSION_10_2 = 3, + ATH10K_FW_WMI_OP_VERSION_TLV = 4, + ATH10K_FW_WMI_OP_VERSION_10_2_4 = 5, + ATH10K_FW_WMI_OP_VERSION_10_4 = 6, + + /* keep last */ + ATH10K_FW_WMI_OP_VERSION_MAX, +}; + +enum ath10k_fw_htt_op_version { + ATH10K_FW_HTT_OP_VERSION_UNSET = 0, + + ATH10K_FW_HTT_OP_VERSION_MAIN = 1, + + /* also used in 10.2 and 10.2.4 branches */ + ATH10K_FW_HTT_OP_VERSION_10_1 = 2, + + ATH10K_FW_HTT_OP_VERSION_TLV = 3, + + ATH10K_FW_HTT_OP_VERSION_10_4 = 4, + + /* keep last */ + ATH10K_FW_HTT_OP_VERSION_MAX, +}; + +enum ath10k_bd_ie_type { + /* contains sub IEs of enum ath10k_bd_ie_board_type */ + ATH10K_BD_IE_BOARD = 0, + ATH10K_BD_IE_BOARD_EXT = 1, +}; + +enum ath10k_bd_ie_board_type { + ATH10K_BD_IE_BOARD_NAME = 0, + ATH10K_BD_IE_BOARD_DATA = 1, +}; + +enum ath10k_hw_rev { + ATH10K_HW_QCA988X, + ATH10K_HW_QCA6174, + ATH10K_HW_QCA99X0, + ATH10K_HW_QCA9888, + ATH10K_HW_QCA9984, + ATH10K_HW_QCA9377, + ATH10K_HW_QCA4019, + ATH10K_HW_QCA9887, + ATH10K_HW_WCN3990, +}; + +struct ath10k_hw_regs { + u32 rtc_soc_base_address; + u32 rtc_wmac_base_address; + u32 soc_core_base_address; + u32 wlan_mac_base_address; + u32 ce_wrapper_base_address; + u32 ce0_base_address; + u32 ce1_base_address; + u32 ce2_base_address; + u32 ce3_base_address; + u32 ce4_base_address; + u32 ce5_base_address; + u32 ce6_base_address; + u32 ce7_base_address; + u32 ce8_base_address; + u32 ce9_base_address; + u32 ce10_base_address; + u32 ce11_base_address; + u32 soc_reset_control_si0_rst_mask; + u32 soc_reset_control_ce_rst_mask; + u32 soc_chip_id_address; + u32 scratch_3_address; + u32 fw_indicator_address; + u32 pcie_local_base_address; + u32 ce_wrap_intr_sum_host_msi_lsb; + u32 ce_wrap_intr_sum_host_msi_mask; + u32 pcie_intr_fw_mask; + u32 pcie_intr_ce_mask_all; + u32 pcie_intr_clr_address; + u32 cpu_pll_init_address; + u32 cpu_speed_address; + u32 core_clk_div_address; +}; + +extern const struct ath10k_hw_regs qca988x_regs; +extern const struct ath10k_hw_regs qca6174_regs; +extern const struct ath10k_hw_regs qca99x0_regs; +extern const struct ath10k_hw_regs qca4019_regs; +extern const struct ath10k_hw_regs wcn3990_regs; + +struct ath10k_hw_ce_regs_addr_map { + u32 msb; + u32 lsb; + u32 mask; +}; + +struct ath10k_hw_ce_ctrl1 { + u32 addr; + u32 hw_mask; + u32 sw_mask; + u32 hw_wr_mask; + u32 sw_wr_mask; + u32 reset_mask; + u32 reset; + const struct ath10k_hw_ce_regs_addr_map *src_ring; + const struct ath10k_hw_ce_regs_addr_map *dst_ring; + const struct ath10k_hw_ce_regs_addr_map *dmax; +}; + +struct ath10k_hw_ce_cmd_halt { + u32 status_reset; + u32 msb; + u32 mask; + const struct ath10k_hw_ce_regs_addr_map *status; +}; + +struct ath10k_hw_ce_host_ie { + u32 copy_complete_reset; + const struct ath10k_hw_ce_regs_addr_map *copy_complete; +}; + +struct ath10k_hw_ce_host_wm_regs { + u32 dstr_lmask; + u32 dstr_hmask; + u32 srcr_lmask; + u32 srcr_hmask; + u32 cc_mask; + u32 wm_mask; + u32 addr; +}; + +struct ath10k_hw_ce_misc_regs { + u32 axi_err; + u32 dstr_add_err; + u32 srcr_len_err; + u32 dstr_mlen_vio; + u32 dstr_overflow; + u32 srcr_overflow; + u32 err_mask; + u32 addr; +}; + +struct ath10k_hw_ce_dst_src_wm_regs { + u32 addr; + u32 low_rst; + u32 high_rst; + const struct ath10k_hw_ce_regs_addr_map *wm_low; + const struct ath10k_hw_ce_regs_addr_map *wm_high; +}; + +struct ath10k_hw_ce_ctrl1_upd { + u32 shift; + u32 mask; + u32 enable; +}; + +struct ath10k_hw_ce_regs { + u32 sr_base_addr_lo; + u32 sr_base_addr_hi; + u32 sr_size_addr; + u32 dr_base_addr_lo; + u32 dr_base_addr_hi; + u32 dr_size_addr; + u32 ce_cmd_addr; + u32 misc_ie_addr; + u32 sr_wr_index_addr; + u32 dst_wr_index_addr; + u32 current_srri_addr; + u32 current_drri_addr; + u32 ddr_addr_for_rri_low; + u32 ddr_addr_for_rri_high; + u32 ce_rri_low; + u32 ce_rri_high; + u32 host_ie_addr; + const struct ath10k_hw_ce_host_wm_regs *wm_regs; + const struct ath10k_hw_ce_misc_regs *misc_regs; + const struct ath10k_hw_ce_ctrl1 *ctrl1_regs; + const struct ath10k_hw_ce_cmd_halt *cmd_halt; + const struct ath10k_hw_ce_host_ie *host_ie; + const struct ath10k_hw_ce_dst_src_wm_regs *wm_srcr; + const struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr; + const struct ath10k_hw_ce_ctrl1_upd *upd; +}; + +struct ath10k_hw_values { + u32 rtc_state_val_on; + u8 ce_count; + u8 msi_assign_ce_max; + u8 num_target_ce_config_wlan; + u16 ce_desc_meta_data_mask; + u8 ce_desc_meta_data_lsb; + u32 rfkill_pin; + u32 rfkill_cfg; + bool rfkill_on_level; +}; + +extern const struct ath10k_hw_values qca988x_values; +extern const struct ath10k_hw_values qca6174_values; +extern const struct ath10k_hw_values qca99x0_values; +extern const struct ath10k_hw_values qca9888_values; +extern const struct ath10k_hw_values qca4019_values; +extern const struct ath10k_hw_values wcn3990_values; +extern const struct ath10k_hw_ce_regs wcn3990_ce_regs; +extern const struct ath10k_hw_ce_regs qcax_ce_regs; + +void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, + u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev); + +int ath10k_hw_diag_fast_download(struct ath10k *ar, + u32 address, + const void *buffer, + u32 length); + +#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X) +#define QCA_REV_9887(ar) ((ar)->hw_rev == ATH10K_HW_QCA9887) +#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174) +#define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0) +#define QCA_REV_9888(ar) ((ar)->hw_rev == ATH10K_HW_QCA9888) +#define QCA_REV_9984(ar) ((ar)->hw_rev == ATH10K_HW_QCA9984) +#define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377) +#define QCA_REV_40XX(ar) ((ar)->hw_rev == ATH10K_HW_QCA4019) +#define QCA_REV_WCN3990(ar) ((ar)->hw_rev == ATH10K_HW_WCN3990) + +/* Known peculiarities: * - raw appears in nwifi decap, raw and nwifi appear in ethernet decap * - raw have FCS, nwifi doesn't * - ethernet frames have 802.11 header decapped and parts (base hdr, cipher - * param, llc/snap) are aligned to 4byte boundaries each */ + * param, llc/snap) are aligned to 4byte boundaries each + */ enum ath10k_hw_txrx_mode { ATH10K_HW_TXRX_RAW = 0, + + /* Native Wifi decap mode is used to align IP frames to 4-byte + * boundaries and avoid a very expensive re-alignment in mac80211. + */ ATH10K_HW_TXRX_NATIVE_WIFI = 1, ATH10K_HW_TXRX_ETHERNET = 2, + + /* Valid for HTT >= 3.0. Used for management frames in TX_FRM. */ + ATH10K_HW_TXRX_MGMT = 3, }; enum ath10k_mcast2ucast_mode { @@ -60,22 +432,271 @@ enum ath10k_mcast2ucast_mode { ATH10K_MCAST2UCAST_ENABLED = 1, }; +enum ath10k_hw_rate_ofdm { + ATH10K_HW_RATE_OFDM_48M = 0, + ATH10K_HW_RATE_OFDM_24M, + ATH10K_HW_RATE_OFDM_12M, + ATH10K_HW_RATE_OFDM_6M, + ATH10K_HW_RATE_OFDM_54M, + ATH10K_HW_RATE_OFDM_36M, + ATH10K_HW_RATE_OFDM_18M, + ATH10K_HW_RATE_OFDM_9M, +}; + +enum ath10k_hw_rate_cck { + ATH10K_HW_RATE_CCK_LP_11M = 0, + ATH10K_HW_RATE_CCK_LP_5_5M, + ATH10K_HW_RATE_CCK_LP_2M, + ATH10K_HW_RATE_CCK_LP_1M, + ATH10K_HW_RATE_CCK_SP_11M, + ATH10K_HW_RATE_CCK_SP_5_5M, + ATH10K_HW_RATE_CCK_SP_2M, +}; + +enum ath10k_hw_rate_rev2_cck { + ATH10K_HW_RATE_REV2_CCK_LP_1M = 1, + ATH10K_HW_RATE_REV2_CCK_LP_2M, + ATH10K_HW_RATE_REV2_CCK_LP_5_5M, + ATH10K_HW_RATE_REV2_CCK_LP_11M, + ATH10K_HW_RATE_REV2_CCK_SP_2M, + ATH10K_HW_RATE_REV2_CCK_SP_5_5M, + ATH10K_HW_RATE_REV2_CCK_SP_11M, +}; + +enum ath10k_hw_cc_wraparound_type { + ATH10K_HW_CC_WRAP_DISABLED = 0, + + /* This type is when the HW chip has a quirky Cycle Counter + * wraparound which resets to 0x7fffffff instead of 0. All + * other CC related counters (e.g. Rx Clear Count) are divided + * by 2 so they never wraparound themselves. + */ + ATH10K_HW_CC_WRAP_SHIFTED_ALL = 1, + + /* Each hw counter wraps around independently. When the + * counter overflows the respective counter is right shifted + * by 1, i.e reset to 0x7fffffff, and other counters will be + * running unaffected. In this type of wraparound, it should + * be possible to report accurate Rx busy time unlike the + * first type. + */ + ATH10K_HW_CC_WRAP_SHIFTED_EACH = 2, +}; + +enum ath10k_hw_refclk_speed { + ATH10K_HW_REFCLK_UNKNOWN = -1, + ATH10K_HW_REFCLK_48_MHZ = 0, + ATH10K_HW_REFCLK_19_2_MHZ = 1, + ATH10K_HW_REFCLK_24_MHZ = 2, + ATH10K_HW_REFCLK_26_MHZ = 3, + ATH10K_HW_REFCLK_37_4_MHZ = 4, + ATH10K_HW_REFCLK_38_4_MHZ = 5, + ATH10K_HW_REFCLK_40_MHZ = 6, + ATH10K_HW_REFCLK_52_MHZ = 7, + + /* must be the last one */ + ATH10K_HW_REFCLK_COUNT, +}; + +struct ath10k_hw_clk_params { + u32 refclk; + u32 div; + u32 rnfrac; + u32 settle_time; + u32 refdiv; + u32 outdiv; +}; + +struct htt_rx_desc_ops; + +struct ath10k_hw_params { + u32 id; + u16 dev_id; + enum ath10k_bus bus; + const char *name; + u32 patch_load_addr; + int uart_pin; + int led_pin; + u32 otp_exe_param; + + /* Type of hw cycle counter wraparound logic, for more info + * refer enum ath10k_hw_cc_wraparound_type. + */ + enum ath10k_hw_cc_wraparound_type cc_wraparound_type; + + /* Some of chip expects fragment descriptor to be continuous + * memory for any TX operation. Set continuous_frag_desc flag + * for the hardware which have such requirement. + */ + bool continuous_frag_desc; + + /* CCK hardware rate table mapping for the newer chipsets + * like QCA99X0, QCA4019 got revised. The CCK h/w rate values + * are in a proper order with respect to the rate/preamble + */ + bool cck_rate_map_rev2; + + u32 channel_counters_freq_hz; + + /* Mgmt tx descriptors threshold for limiting probe response + * frames. + */ + u32 max_probe_resp_desc_thres; + + u32 tx_chain_mask; + u32 rx_chain_mask; + u32 max_spatial_stream; + u32 cal_data_len; + + struct ath10k_hw_params_fw { + const char *dir; + size_t board_size; + size_t ext_board_size; + size_t board_ext_size; + } fw; + + /* qca99x0 family chips deliver broadcast/multicast management + * frames encrypted and expect software do decryption. + */ + bool sw_decrypt_mcast_mgmt; + + /* Rx descriptor abstraction */ + const struct ath10k_htt_rx_desc_ops *rx_desc_ops; + + const struct ath10k_hw_ops *hw_ops; + + /* Number of bytes used for alignment in rx_hdr_status of rx desc. */ + int decap_align_bytes; + + /* hw specific clock control parameters */ + const struct ath10k_hw_clk_params *hw_clk; + int target_cpu_freq; + + /* Number of bytes to be discarded for each FFT sample */ + int spectral_bin_discard; + + /* The board may have a restricted NSS for 160 or 80+80 vs what it + * can do for 80Mhz. + */ + int vht160_mcs_rx_highest; + int vht160_mcs_tx_highest; + + /* Number of ciphers supported (i.e First N) in cipher_suites array */ + int n_cipher_suites; + + u32 num_peers; + u32 ast_skid_limit; + u32 num_wds_entries; + + /* Targets supporting physical addressing capability above 32-bits */ + bool target_64bit; + + /* Target rx ring fill level */ + u32 rx_ring_fill_level; + + /* target supporting shadow register for ce write */ + bool shadow_reg_support; + + /* target supporting retention restore on ddr */ + bool rri_on_ddr; + + /* Number of bytes to be the offset for each FFT sample */ + int spectral_bin_offset; + + /* targets which require hw filter reset during boot up, + * to avoid it sending spurious acks. + */ + bool hw_filter_reset_required; + + /* target supporting fw download via diag ce */ + bool fw_diag_ce_download; + + /* target supporting fw download via large size BMI */ + bool bmi_large_size_download; + + /* need to set uart pin if disable uart print, workaround for a + * firmware bug + */ + bool uart_pin_workaround; + + /* Workaround for the credit size calculation */ + bool credit_size_workaround; + + /* tx stats support over pktlog */ + bool tx_stats_over_pktlog; + + /* provides bitrates for sta_statistics using WMI_TLV_PEER_STATS_INFO_EVENTID */ + bool supports_peer_stats_info; + + bool dynamic_sar_support; + + bool hw_restart_disconnect; + + bool use_fw_tx_credits; + + bool delay_unmap_buffer; + + /* The hardware support multicast frame registrations */ + bool mcast_frame_registration; +}; + +struct htt_resp; +struct htt_data_tx_completion_ext; +struct htt_rx_ring_rx_desc_offsets; + +/* Defines needed for Rx descriptor abstraction */ +struct ath10k_hw_ops { + void (*set_coverage_class)(struct ath10k *ar, int radio_idx, s16 value); + int (*enable_pll_clk)(struct ath10k *ar); + int (*tx_data_rssi_pad_bytes)(struct htt_resp *htt); + int (*is_rssi_enable)(struct htt_resp *resp); +}; + +extern const struct ath10k_hw_ops qca988x_ops; +extern const struct ath10k_hw_ops qca99x0_ops; +extern const struct ath10k_hw_ops qca6174_ops; +extern const struct ath10k_hw_ops qca6174_sdio_ops; +extern const struct ath10k_hw_ops wcn3990_ops; + +extern const struct ath10k_hw_clk_params qca6174_clk[]; + +static inline int +ath10k_tx_data_rssi_get_pad_bytes(struct ath10k_hw_params *hw, + struct htt_resp *htt) +{ + if (hw->hw_ops->tx_data_rssi_pad_bytes) + return hw->hw_ops->tx_data_rssi_pad_bytes(htt); + return 0; +} + +static inline int +ath10k_is_rssi_enable(struct ath10k_hw_params *hw, + struct htt_resp *resp) +{ + if (hw->hw_ops->is_rssi_enable) + return hw->hw_ops->is_rssi_enable(resp); + return 0; +} + +/* Target specific defines for MAIN firmware */ #define TARGET_NUM_VDEVS 8 #define TARGET_NUM_PEER_AST 2 #define TARGET_NUM_WDS_ENTRIES 32 #define TARGET_DMA_BURST_SIZE 0 #define TARGET_MAC_AGGR_DELIM 0 #define TARGET_AST_SKID_LIMIT 16 -#define TARGET_NUM_PEERS 16 +#define TARGET_NUM_STATIONS 16 +#define TARGET_NUM_PEERS ((TARGET_NUM_STATIONS) + \ + (TARGET_NUM_VDEVS)) #define TARGET_NUM_OFFLOAD_PEERS 0 #define TARGET_NUM_OFFLOAD_REORDER_BUFS 0 #define TARGET_NUM_PEER_KEYS 2 -#define TARGET_NUM_TIDS (2 * ((TARGET_NUM_PEERS) + (TARGET_NUM_VDEVS))) +#define TARGET_NUM_TIDS ((TARGET_NUM_PEERS) * 2) #define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) #define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) #define TARGET_RX_TIMEOUT_LO_PRI 100 #define TARGET_RX_TIMEOUT_HI_PRI 40 -#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_ETHERNET + #define TARGET_SCAN_MAX_PENDING_REQS 4 #define TARGET_BMISS_OFFLOAD_MAX_VDEV 3 #define TARGET_ROAM_OFFLOAD_MAX_VDEV 3 @@ -90,19 +711,137 @@ enum ath10k_mcast2ucast_mode { #define TARGET_NUM_MSDU_DESC (1024 + 400) #define TARGET_MAX_FRAG_ENTRIES 0 +/* Target specific defines for 10.X firmware */ +#define TARGET_10X_NUM_VDEVS 16 +#define TARGET_10X_NUM_PEER_AST 2 +#define TARGET_10X_NUM_WDS_ENTRIES 32 +#define TARGET_10X_DMA_BURST_SIZE 0 +#define TARGET_10X_MAC_AGGR_DELIM 0 +#define TARGET_10X_AST_SKID_LIMIT 128 +#define TARGET_10X_NUM_STATIONS 128 +#define TARGET_10X_TX_STATS_NUM_STATIONS 118 +#define TARGET_10X_NUM_PEERS ((TARGET_10X_NUM_STATIONS) + \ + (TARGET_10X_NUM_VDEVS)) +#define TARGET_10X_TX_STATS_NUM_PEERS ((TARGET_10X_TX_STATS_NUM_STATIONS) + \ + (TARGET_10X_NUM_VDEVS)) +#define TARGET_10X_NUM_OFFLOAD_PEERS 0 +#define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0 +#define TARGET_10X_NUM_PEER_KEYS 2 +#define TARGET_10X_NUM_TIDS_MAX 256 +#define TARGET_10X_NUM_TIDS min((TARGET_10X_NUM_TIDS_MAX), \ + (TARGET_10X_NUM_PEERS) * 2) +#define TARGET_10X_TX_STATS_NUM_TIDS min((TARGET_10X_NUM_TIDS_MAX), \ + (TARGET_10X_TX_STATS_NUM_PEERS) * 2) +#define TARGET_10X_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) +#define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) +#define TARGET_10X_RX_TIMEOUT_LO_PRI 100 +#define TARGET_10X_RX_TIMEOUT_HI_PRI 40 +#define TARGET_10X_SCAN_MAX_PENDING_REQS 4 +#define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV 2 +#define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV 2 +#define TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES 8 +#define TARGET_10X_GTK_OFFLOAD_MAX_VDEV 3 +#define TARGET_10X_NUM_MCAST_GROUPS 0 +#define TARGET_10X_NUM_MCAST_TABLE_ELEMS 0 +#define TARGET_10X_MCAST2UCAST_MODE ATH10K_MCAST2UCAST_DISABLED +#define TARGET_10X_TX_DBG_LOG_SIZE 1024 +#define TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1 +#define TARGET_10X_VOW_CONFIG 0 +#define TARGET_10X_NUM_MSDU_DESC (1024 + 400) +#define TARGET_10X_MAX_FRAG_ENTRIES 0 -/* Number of Copy Engines supported */ -#define CE_COUNT 8 +/* 10.2 parameters */ +#define TARGET_10_2_DMA_BURST_SIZE 0 -/* - * Total number of PCIe MSI interrupts requested for all interrupt sources. - * PCIe standard forces this to be a power of 2. - * Some Host OS's limit MSI requests that can be granted to 8 - * so for now we abide by this limit and avoid requesting more - * than that. - */ -#define MSI_NUM_REQUEST_LOG2 3 -#define MSI_NUM_REQUEST (1<<MSI_NUM_REQUEST_LOG2) +/* Target specific defines for WMI-TLV firmware */ +#define TARGET_TLV_NUM_VDEVS 4 +#define TARGET_TLV_NUM_STATIONS 32 +#define TARGET_TLV_NUM_PEERS 33 +#define TARGET_TLV_NUM_TDLS_VDEVS 1 +#define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2) +#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32) +#define TARGET_TLV_NUM_MSDU_DESC_HL 1024 +#define TARGET_TLV_NUM_WOW_PATTERNS 22 +#define TARGET_TLV_MGMT_NUM_MSDU_DESC (50) + +/* Target specific defines for WMI-HL-1.0 firmware */ +#define TARGET_HL_TLV_NUM_PEERS 33 +#define TARGET_HL_TLV_AST_SKID_LIMIT 16 +#define TARGET_HL_TLV_NUM_WDS_ENTRIES 2 + +/* Target specific defines for QCA9377 high latency firmware */ +#define TARGET_QCA9377_HL_NUM_PEERS 15 + +/* Diagnostic Window */ +#define CE_DIAG_PIPE 7 + +#define NUM_TARGET_CE_CONFIG_WLAN ar->hw_values->num_target_ce_config_wlan + +/* Target specific defines for 10.4 firmware */ +#define TARGET_10_4_NUM_VDEVS 16 +#define TARGET_10_4_NUM_STATIONS 32 +#define TARGET_10_4_NUM_PEERS ((TARGET_10_4_NUM_STATIONS) + \ + (TARGET_10_4_NUM_VDEVS)) +#define TARGET_10_4_ACTIVE_PEERS 0 + +#define TARGET_10_4_NUM_QCACHE_PEERS_MAX 512 +#define TARGET_10_4_QCACHE_ACTIVE_PEERS 50 +#define TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC 35 +#define TARGET_10_4_NUM_OFFLOAD_PEERS 0 +#define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS 0 +#define TARGET_10_4_NUM_PEER_KEYS 2 +#define TARGET_10_4_TGT_NUM_TIDS ((TARGET_10_4_NUM_PEERS) * 2) +#define TARGET_10_4_NUM_MSDU_DESC (1024 + 400) +#define TARGET_10_4_NUM_MSDU_DESC_PFC 2500 +#define TARGET_10_4_AST_SKID_LIMIT 32 + +/* 100 ms for video, best-effort, and background */ +#define TARGET_10_4_RX_TIMEOUT_LO_PRI 100 + +/* 40 ms for voice */ +#define TARGET_10_4_RX_TIMEOUT_HI_PRI 40 + +#define TARGET_10_4_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI +#define TARGET_10_4_SCAN_MAX_REQS 4 +#define TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV 3 +#define TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV 3 +#define TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES 8 + +/* Note: mcast to ucast is disabled by default */ +#define TARGET_10_4_NUM_MCAST_GROUPS 0 +#define TARGET_10_4_NUM_MCAST_TABLE_ELEMS 0 +#define TARGET_10_4_MCAST2UCAST_MODE 0 + +#define TARGET_10_4_TX_DBG_LOG_SIZE 1024 +#define TARGET_10_4_NUM_WDS_ENTRIES 32 +#define TARGET_10_4_DMA_BURST_SIZE 1 +#define TARGET_10_4_MAC_AGGR_DELIM 0 +#define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1 +#define TARGET_10_4_VOW_CONFIG 0 +#define TARGET_10_4_GTK_OFFLOAD_MAX_VDEV 3 +#define TARGET_10_4_11AC_TX_MAX_FRAGS 2 +#define TARGET_10_4_MAX_PEER_EXT_STATS 16 +#define TARGET_10_4_SMART_ANT_CAP 0 +#define TARGET_10_4_BK_MIN_FREE 0 +#define TARGET_10_4_BE_MIN_FREE 0 +#define TARGET_10_4_VI_MIN_FREE 0 +#define TARGET_10_4_VO_MIN_FREE 0 +#define TARGET_10_4_RX_BATCH_MODE 1 +#define TARGET_10_4_THERMAL_THROTTLING_CONFIG 0 +#define TARGET_10_4_ATF_CONFIG 0 +#define TARGET_10_4_IPHDR_PAD_CONFIG 1 +#define TARGET_10_4_QWRAP_CONFIG 0 + +/* TDLS config */ +#define TARGET_10_4_NUM_TDLS_VDEVS 1 +#define TARGET_10_4_NUM_TDLS_BUFFER_STA 1 +#define TARGET_10_4_NUM_TDLS_SLEEP_STA 1 + +/* Maximum number of Copy Engines supported */ +#define CE_COUNT_MAX 12 + +/* Number of Copy Engines supported */ +#define CE_COUNT ar->hw_values->ce_count /* * Granted MSIs are assigned as follows: @@ -116,12 +855,11 @@ enum ath10k_mcast2ucast_mode { /* MSIs for Copy Engines */ #define MSI_ASSIGN_CE_INITIAL 1 -#define MSI_ASSIGN_CE_MAX 7 +#define MSI_ASSIGN_CE_MAX ar->hw_values->msi_assign_ce_max /* as of IP3.7.1 */ -#define RTC_STATE_V_ON 3 +#define RTC_STATE_V_ON ar->hw_values->rtc_state_val_on -#define RTC_STATE_COLD_RESET_MASK 0x00000400 #define RTC_STATE_V_LSB 0 #define RTC_STATE_V_MASK 0x00000007 #define RTC_STATE_ADDRESS 0x0000 @@ -130,35 +868,38 @@ enum ath10k_mcast2ucast_mode { #define PCIE_SOC_WAKE_RESET 0x00000000 #define SOC_GLOBAL_RESET_ADDRESS 0x0008 -#define RTC_SOC_BASE_ADDRESS 0x00004000 -#define RTC_WMAC_BASE_ADDRESS 0x00005000 +#define RTC_SOC_BASE_ADDRESS ar->regs->rtc_soc_base_address +#define RTC_WMAC_BASE_ADDRESS ar->regs->rtc_wmac_base_address #define MAC_COEX_BASE_ADDRESS 0x00006000 #define BT_COEX_BASE_ADDRESS 0x00007000 #define SOC_PCIE_BASE_ADDRESS 0x00008000 -#define SOC_CORE_BASE_ADDRESS 0x00009000 +#define SOC_CORE_BASE_ADDRESS ar->regs->soc_core_base_address #define WLAN_UART_BASE_ADDRESS 0x0000c000 #define WLAN_SI_BASE_ADDRESS 0x00010000 #define WLAN_GPIO_BASE_ADDRESS 0x00014000 #define WLAN_ANALOG_INTF_BASE_ADDRESS 0x0001c000 -#define WLAN_MAC_BASE_ADDRESS 0x00020000 +#define WLAN_MAC_BASE_ADDRESS ar->regs->wlan_mac_base_address #define EFUSE_BASE_ADDRESS 0x00030000 #define FPGA_REG_BASE_ADDRESS 0x00039000 #define WLAN_UART2_BASE_ADDRESS 0x00054c00 -#define CE_WRAPPER_BASE_ADDRESS 0x00057000 -#define CE0_BASE_ADDRESS 0x00057400 -#define CE1_BASE_ADDRESS 0x00057800 -#define CE2_BASE_ADDRESS 0x00057c00 -#define CE3_BASE_ADDRESS 0x00058000 -#define CE4_BASE_ADDRESS 0x00058400 -#define CE5_BASE_ADDRESS 0x00058800 -#define CE6_BASE_ADDRESS 0x00058c00 -#define CE7_BASE_ADDRESS 0x00059000 +#define CE_WRAPPER_BASE_ADDRESS ar->regs->ce_wrapper_base_address +#define CE0_BASE_ADDRESS ar->regs->ce0_base_address +#define CE1_BASE_ADDRESS ar->regs->ce1_base_address +#define CE2_BASE_ADDRESS ar->regs->ce2_base_address +#define CE3_BASE_ADDRESS ar->regs->ce3_base_address +#define CE4_BASE_ADDRESS ar->regs->ce4_base_address +#define CE5_BASE_ADDRESS ar->regs->ce5_base_address +#define CE6_BASE_ADDRESS ar->regs->ce6_base_address +#define CE7_BASE_ADDRESS ar->regs->ce7_base_address #define DBI_BASE_ADDRESS 0x00060000 #define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000 -#define PCIE_LOCAL_BASE_ADDRESS 0x00080000 +#define PCIE_LOCAL_BASE_ADDRESS ar->regs->pcie_local_base_address +#define SOC_RESET_CONTROL_ADDRESS 0x00000000 #define SOC_RESET_CONTROL_OFFSET 0x00000000 -#define SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001 +#define SOC_RESET_CONTROL_SI0_RST_MASK ar->regs->soc_reset_control_si0_rst_mask +#define SOC_RESET_CONTROL_CE_RST_MASK ar->regs->soc_reset_control_ce_rst_mask +#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040 #define SOC_CPU_CLOCK_OFFSET 0x00000020 #define SOC_CPU_CLOCK_STANDARD_LSB 0 #define SOC_CPU_CLOCK_STANDARD_MASK 0x00000003 @@ -168,6 +909,12 @@ enum ath10k_mcast2ucast_mode { #define SOC_LPO_CAL_OFFSET 0x000000e0 #define SOC_LPO_CAL_ENABLE_LSB 20 #define SOC_LPO_CAL_ENABLE_MASK 0x00100000 +#define SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050 +#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004 + +#define SOC_CHIP_ID_ADDRESS ar->regs->soc_chip_id_address +#define SOC_CHIP_ID_REV_LSB 8 +#define SOC_CHIP_ID_REV_MASK 0x00000f00 #define WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008 #define WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004 @@ -175,7 +922,10 @@ enum ath10k_mcast2ucast_mode { #define WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001 #define WLAN_GPIO_PIN0_ADDRESS 0x00000028 +#define WLAN_GPIO_PIN0_CONFIG_LSB 11 #define WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800 +#define WLAN_GPIO_PIN0_PAD_PULL_LSB 5 +#define WLAN_GPIO_PIN0_PAD_PULL_MASK 0x00000060 #define WLAN_GPIO_PIN1_ADDRESS 0x0000002c #define WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800 #define WLAN_GPIO_PIN10_ADDRESS 0x00000050 @@ -188,6 +938,8 @@ enum ath10k_mcast2ucast_mode { #define CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0 #define SI_CONFIG_OFFSET 0x00000000 +#define SI_CONFIG_ERR_INT_LSB 19 +#define SI_CONFIG_ERR_INT_MASK 0x00080000 #define SI_CONFIG_BIDIR_OD_DATA_LSB 18 #define SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000 #define SI_CONFIG_I2C_LSB 16 @@ -201,7 +953,9 @@ enum ath10k_mcast2ucast_mode { #define SI_CONFIG_DIVIDER_LSB 0 #define SI_CONFIG_DIVIDER_MASK 0x0000000f #define SI_CS_OFFSET 0x00000004 +#define SI_CS_DONE_ERR_LSB 10 #define SI_CS_DONE_ERR_MASK 0x00000400 +#define SI_CS_DONE_INT_LSB 9 #define SI_CS_DONE_INT_MASK 0x00000200 #define SI_CS_START_LSB 8 #define SI_CS_START_MASK 0x00000100 @@ -216,22 +970,31 @@ enum ath10k_mcast2ucast_mode { #define SI_RX_DATA1_OFFSET 0x00000014 #define CORE_CTRL_CPU_INTR_MASK 0x00002000 +#define CORE_CTRL_PCIE_REG_31_MASK 0x00000800 #define CORE_CTRL_ADDRESS 0x0000 #define PCIE_INTR_ENABLE_ADDRESS 0x0008 -#define PCIE_INTR_CLR_ADDRESS 0x0014 -#define SCRATCH_3_ADDRESS 0x0030 +#define PCIE_INTR_CAUSE_ADDRESS 0x000c +#define PCIE_INTR_CLR_ADDRESS ar->regs->pcie_intr_clr_address +#define SCRATCH_3_ADDRESS ar->regs->scratch_3_address +#define CPU_INTR_ADDRESS 0x0010 +#define FW_RAM_CONFIG_ADDRESS 0x0018 + +#define CCNT_TO_MSEC(ar, x) ((x) / ar->hw_params.channel_counters_freq_hz) /* Firmware indications to the Host via SCRATCH_3 register. */ -#define FW_INDICATOR_ADDRESS (SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS) +#define FW_INDICATOR_ADDRESS ar->regs->fw_indicator_address #define FW_IND_EVENT_PENDING 1 #define FW_IND_INITIALIZED 2 +#define FW_IND_HOST_READY 0x80000000 /* HOST_REG interrupt from firmware */ -#define PCIE_INTR_FIRMWARE_MASK 0x00000400 -#define PCIE_INTR_CE_MASK_ALL 0x0007f800 +#define PCIE_INTR_FIRMWARE_MASK ar->regs->pcie_intr_fw_mask +#define PCIE_INTR_CE_MASK_ALL ar->regs->pcie_intr_ce_mask_all #define DRAM_BASE_ADDRESS 0x00400000 +#define PCIE_BAR_REG_ADDRESS 0x40030 + #define MISSING 0 #define SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET @@ -244,7 +1007,10 @@ enum ath10k_mcast2ucast_mode { #define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS #define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS #define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS +#define GPIO_PIN0_CONFIG_LSB WLAN_GPIO_PIN0_CONFIG_LSB #define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK +#define GPIO_PIN0_PAD_PULL_LSB WLAN_GPIO_PIN0_PAD_PULL_LSB +#define GPIO_PIN0_PAD_PULL_MASK WLAN_GPIO_PIN0_PAD_PULL_MASK #define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK #define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS #define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS @@ -299,6 +1065,141 @@ enum ath10k_mcast2ucast_mode { #define WINDOW_READ_ADDR_ADDRESS MISSING #define WINDOW_WRITE_ADDR_ADDRESS MISSING +#define QCA9887_1_0_I2C_SDA_GPIO_PIN 5 +#define QCA9887_1_0_I2C_SDA_PIN_CONFIG 3 +#define QCA9887_1_0_SI_CLK_GPIO_PIN 17 +#define QCA9887_1_0_SI_CLK_PIN_CONFIG 3 +#define QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS 0x00000010 + +#define QCA9887_EEPROM_SELECT_READ 0xa10000a0 +#define QCA9887_EEPROM_ADDR_HI_MASK 0x0000ff00 +#define QCA9887_EEPROM_ADDR_HI_LSB 8 +#define QCA9887_EEPROM_ADDR_LO_MASK 0x00ff0000 +#define QCA9887_EEPROM_ADDR_LO_LSB 16 + +#define MBOX_RESET_CONTROL_ADDRESS 0x00000000 +#define MBOX_HOST_INT_STATUS_ADDRESS 0x00000800 +#define MBOX_HOST_INT_STATUS_ERROR_LSB 7 +#define MBOX_HOST_INT_STATUS_ERROR_MASK 0x00000080 +#define MBOX_HOST_INT_STATUS_CPU_LSB 6 +#define MBOX_HOST_INT_STATUS_CPU_MASK 0x00000040 +#define MBOX_HOST_INT_STATUS_COUNTER_LSB 4 +#define MBOX_HOST_INT_STATUS_COUNTER_MASK 0x00000010 +#define MBOX_CPU_INT_STATUS_ADDRESS 0x00000801 +#define MBOX_ERROR_INT_STATUS_ADDRESS 0x00000802 +#define MBOX_ERROR_INT_STATUS_WAKEUP_LSB 2 +#define MBOX_ERROR_INT_STATUS_WAKEUP_MASK 0x00000004 +#define MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_LSB 1 +#define MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_MASK 0x00000002 +#define MBOX_ERROR_INT_STATUS_TX_OVERFLOW_LSB 0 +#define MBOX_ERROR_INT_STATUS_TX_OVERFLOW_MASK 0x00000001 +#define MBOX_COUNTER_INT_STATUS_ADDRESS 0x00000803 +#define MBOX_COUNTER_INT_STATUS_COUNTER_LSB 0 +#define MBOX_COUNTER_INT_STATUS_COUNTER_MASK 0x000000ff +#define MBOX_RX_LOOKAHEAD_VALID_ADDRESS 0x00000805 +#define MBOX_INT_STATUS_ENABLE_ADDRESS 0x00000828 +#define MBOX_INT_STATUS_ENABLE_ERROR_LSB 7 +#define MBOX_INT_STATUS_ENABLE_ERROR_MASK 0x00000080 +#define MBOX_INT_STATUS_ENABLE_CPU_LSB 6 +#define MBOX_INT_STATUS_ENABLE_CPU_MASK 0x00000040 +#define MBOX_INT_STATUS_ENABLE_INT_LSB 5 +#define MBOX_INT_STATUS_ENABLE_INT_MASK 0x00000020 +#define MBOX_INT_STATUS_ENABLE_COUNTER_LSB 4 +#define MBOX_INT_STATUS_ENABLE_COUNTER_MASK 0x00000010 +#define MBOX_INT_STATUS_ENABLE_MBOX_DATA_LSB 0 +#define MBOX_INT_STATUS_ENABLE_MBOX_DATA_MASK 0x0000000f +#define MBOX_CPU_INT_STATUS_ENABLE_ADDRESS 0x00000819 +#define MBOX_CPU_INT_STATUS_ENABLE_BIT_LSB 0 +#define MBOX_CPU_INT_STATUS_ENABLE_BIT_MASK 0x000000ff +#define MBOX_CPU_STATUS_ENABLE_ASSERT_MASK 0x00000001 +#define MBOX_ERROR_STATUS_ENABLE_ADDRESS 0x0000081a +#define MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB 1 +#define MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK 0x00000002 +#define MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB 0 +#define MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK 0x00000001 +#define MBOX_COUNTER_INT_STATUS_ENABLE_ADDRESS 0x0000081b +#define MBOX_COUNTER_INT_STATUS_ENABLE_BIT_LSB 0 +#define MBOX_COUNTER_INT_STATUS_ENABLE_BIT_MASK 0x000000ff +#define MBOX_COUNT_ADDRESS 0x00000820 +#define MBOX_COUNT_DEC_ADDRESS 0x00000840 +#define MBOX_WINDOW_DATA_ADDRESS 0x00000874 +#define MBOX_WINDOW_WRITE_ADDR_ADDRESS 0x00000878 +#define MBOX_WINDOW_READ_ADDR_ADDRESS 0x0000087c +#define MBOX_CPU_DBG_SEL_ADDRESS 0x00000883 +#define MBOX_CPU_DBG_ADDRESS 0x00000884 +#define MBOX_RTC_BASE_ADDRESS 0x00000000 +#define MBOX_GPIO_BASE_ADDRESS 0x00005000 +#define MBOX_MBOX_BASE_ADDRESS 0x00008000 + #define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB) +/* Register definitions for first generation ath10k cards. These cards include + * a mac which has a register allocation similar to ath9k and at least some + * registers including the ones relevant for modifying the coverage class are + * identical to the ath9k definitions. + * These registers are usually managed by the ath10k firmware. However by + * overriding them it is possible to support coverage class modifications. + */ +#define WAVE1_PCU_ACK_CTS_TIMEOUT 0x8014 +#define WAVE1_PCU_ACK_CTS_TIMEOUT_MAX 0x00003FFF +#define WAVE1_PCU_ACK_CTS_TIMEOUT_ACK_MASK 0x00003FFF +#define WAVE1_PCU_ACK_CTS_TIMEOUT_ACK_LSB 0 +#define WAVE1_PCU_ACK_CTS_TIMEOUT_CTS_MASK 0x3FFF0000 +#define WAVE1_PCU_ACK_CTS_TIMEOUT_CTS_LSB 16 + +#define WAVE1_PCU_GBL_IFS_SLOT 0x1070 +#define WAVE1_PCU_GBL_IFS_SLOT_MASK 0x0000FFFF +#define WAVE1_PCU_GBL_IFS_SLOT_MAX 0x0000FFFF +#define WAVE1_PCU_GBL_IFS_SLOT_LSB 0 +#define WAVE1_PCU_GBL_IFS_SLOT_RESV0 0xFFFF0000 + +#define WAVE1_PHYCLK 0x801C +#define WAVE1_PHYCLK_USEC_MASK 0x0000007F +#define WAVE1_PHYCLK_USEC_LSB 0 + +/* qca6174 PLL offset/mask */ +#define SOC_CORE_CLK_CTRL_OFFSET 0x00000114 +#define SOC_CORE_CLK_CTRL_DIV_LSB 0 +#define SOC_CORE_CLK_CTRL_DIV_MASK 0x00000007 + +#define EFUSE_OFFSET 0x0000032c +#define EFUSE_XTAL_SEL_LSB 8 +#define EFUSE_XTAL_SEL_MASK 0x00000700 + +#define BB_PLL_CONFIG_OFFSET 0x000002f4 +#define BB_PLL_CONFIG_FRAC_LSB 0 +#define BB_PLL_CONFIG_FRAC_MASK 0x0003ffff +#define BB_PLL_CONFIG_OUTDIV_LSB 18 +#define BB_PLL_CONFIG_OUTDIV_MASK 0x001c0000 + +#define WLAN_PLL_SETTLE_OFFSET 0x0018 +#define WLAN_PLL_SETTLE_TIME_LSB 0 +#define WLAN_PLL_SETTLE_TIME_MASK 0x000007ff + +#define WLAN_PLL_CONTROL_OFFSET 0x0014 +#define WLAN_PLL_CONTROL_DIV_LSB 0 +#define WLAN_PLL_CONTROL_DIV_MASK 0x000003ff +#define WLAN_PLL_CONTROL_REFDIV_LSB 10 +#define WLAN_PLL_CONTROL_REFDIV_MASK 0x00003c00 +#define WLAN_PLL_CONTROL_BYPASS_LSB 16 +#define WLAN_PLL_CONTROL_BYPASS_MASK 0x00010000 +#define WLAN_PLL_CONTROL_NOPWD_LSB 18 +#define WLAN_PLL_CONTROL_NOPWD_MASK 0x00040000 + +#define RTC_SYNC_STATUS_OFFSET 0x0244 +#define RTC_SYNC_STATUS_PLL_CHANGING_LSB 5 +#define RTC_SYNC_STATUS_PLL_CHANGING_MASK 0x00000020 +/* qca6174 PLL offset/mask end */ + +/* CPU_ADDR_MSB is a register, bit[3:0] is to specify which memory + * region is accessed. The memory region size is 1M. + * If host wants to access 0xX12345 at target, then CPU_ADDR_MSB[3:0] + * is 0xX. + * The following MACROs are defined to get the 0xX and the size limit. + */ +#define CPU_ADDR_MSB_REGION_MASK GENMASK(23, 20) +#define CPU_ADDR_MSB_REGION_VAL(X) FIELD_GET(CPU_ADDR_MSB_REGION_MASK, X) +#define REGION_ACCESS_SIZE_LIMIT 0x100000 +#define REGION_ACCESS_SIZE_MASK (REGION_ACCESS_SIZE_LIMIT - 1) + #endif /* _HW_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/leds.c b/drivers/net/wireless/ath/ath10k/leds.c new file mode 100644 index 000000000000..3a6c8111e7c6 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/leds.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018 Sebastian Gottschall <s.gottschall@dd-wrt.com> + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + */ + +#include <linux/leds.h> + +#include "core.h" +#include "wmi.h" +#include "wmi-ops.h" + +#include "leds.h" + +static int ath10k_leds_set_brightness_blocking(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct ath10k *ar = container_of(led_cdev, struct ath10k, + leds.cdev); + struct gpio_led *led = &ar->leds.wifi_led; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON) + goto out; + + ar->leds.gpio_state_pin = (brightness != LED_OFF) ^ led->active_low; + ath10k_wmi_gpio_output(ar, ar->hw_params.led_pin, ar->leds.gpio_state_pin); + +out: + mutex_unlock(&ar->conf_mutex); + + return 0; +} + +int ath10k_leds_start(struct ath10k *ar) +{ + if (ar->hw_params.led_pin == 0) + /* leds not supported */ + return 0; + + /* under some circumstances, the gpio pin gets reconfigured + * to default state by the firmware, so we need to + * reconfigure it this behaviour has only ben seen on + * QCA9984 and QCA99XX devices so far + */ + ath10k_wmi_gpio_config(ar, ar->hw_params.led_pin, 0, + WMI_GPIO_PULL_NONE, WMI_GPIO_INTTYPE_DISABLE); + ath10k_wmi_gpio_output(ar, ar->hw_params.led_pin, 1); + + return 0; +} + +int ath10k_leds_register(struct ath10k *ar) +{ + int ret; + + if (ar->hw_params.led_pin == 0) + /* leds not supported */ + return 0; + + snprintf(ar->leds.label, sizeof(ar->leds.label), "ath10k-%s", + wiphy_name(ar->hw->wiphy)); + ar->leds.wifi_led.active_low = 1; + ar->leds.wifi_led.name = ar->leds.label; + ar->leds.wifi_led.default_state = LEDS_GPIO_DEFSTATE_KEEP; + + ar->leds.cdev.name = ar->leds.label; + ar->leds.cdev.brightness_set_blocking = ath10k_leds_set_brightness_blocking; + ar->leds.cdev.default_trigger = ar->leds.wifi_led.default_trigger; + + ret = led_classdev_register(wiphy_dev(ar->hw->wiphy), &ar->leds.cdev); + if (ret) + return ret; + + return 0; +} + +void ath10k_leds_unregister(struct ath10k *ar) +{ + if (ar->hw_params.led_pin == 0) + /* leds not supported */ + return; + + led_classdev_unregister(&ar->leds.cdev); +} + diff --git a/drivers/net/wireless/ath/ath10k/leds.h b/drivers/net/wireless/ath/ath10k/leds.h new file mode 100644 index 000000000000..56325b0875e5 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/leds.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018 Sebastian Gottschall <s.gottschall@dd-wrt.com> + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + */ + +#ifndef _LEDS_H_ +#define _LEDS_H_ + +#include "core.h" + +#ifdef CONFIG_ATH10K_LEDS +void ath10k_leds_unregister(struct ath10k *ar); +int ath10k_leds_start(struct ath10k *ar); +int ath10k_leds_register(struct ath10k *ar); +#else +static inline void ath10k_leds_unregister(struct ath10k *ar) +{ +} + +static inline int ath10k_leds_start(struct ath10k *ar) +{ + return 0; +} + +static inline int ath10k_leds_register(struct ath10k *ar) +{ + return 0; +} + +#endif +#endif /* _LEDS_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index da5c333d0d4b..da6f7957a0ae 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1,30 +1,236 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include "mac.h" +#include <linux/export.h> +#include <net/cfg80211.h> #include <net/mac80211.h> #include <linux/etherdevice.h> +#include <linux/acpi.h> +#include <linux/of.h> +#include <linux/bitfield.h> +#include <linux/random.h> +#include "hif.h" #include "core.h" #include "debug.h" #include "wmi.h" #include "htt.h" #include "txrx.h" +#include "testmode.h" +#include "wmi-tlv.h" +#include "wmi-ops.h" +#include "wow.h" +#include "leds.h" + +/*********/ +/* Rates */ +/*********/ + +static struct ieee80211_rate ath10k_rates[] = { + { .bitrate = 10, + .hw_value = ATH10K_HW_RATE_CCK_LP_1M }, + { .bitrate = 20, + .hw_value = ATH10K_HW_RATE_CCK_LP_2M, + .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, + .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M, + .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, + .hw_value = ATH10K_HW_RATE_CCK_LP_11M, + .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + + { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, + { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, + { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, + { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, + { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, + { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, + { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, + { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, +}; + +static struct ieee80211_rate ath10k_rates_rev2[] = { + { .bitrate = 10, + .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M }, + { .bitrate = 20, + .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M, + .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, + .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M, + .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, + .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M, + .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + + { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, + { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, + { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, + { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, + { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, + { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, + { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, + { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, +}; + +static const struct cfg80211_sar_freq_ranges ath10k_sar_freq_ranges[] = { + {.start_freq = 2402, .end_freq = 2494 }, + {.start_freq = 5170, .end_freq = 5875 }, +}; + +static const struct cfg80211_sar_capa ath10k_sar_capa = { + .type = NL80211_SAR_TYPE_POWER, + .num_freq_ranges = (ARRAY_SIZE(ath10k_sar_freq_ranges)), + .freq_ranges = &ath10k_sar_freq_ranges[0], +}; + +#define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4 + +#define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX) +#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \ + ATH10K_MAC_FIRST_OFDM_RATE_IDX) +#define ath10k_g_rates (ath10k_rates + 0) +#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates)) + +#define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0) +#define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2)) + +#define ath10k_wmi_legacy_rates ath10k_rates + +static bool ath10k_mac_bitrate_is_cck(int bitrate) +{ + switch (bitrate) { + case 10: + case 20: + case 55: + case 110: + return true; + } + + return false; +} + +static u8 ath10k_mac_bitrate_to_rate(int bitrate) +{ + return DIV_ROUND_UP(bitrate, 5) | + (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0); +} + +u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, + u8 hw_rate, bool cck) +{ + const struct ieee80211_rate *rate; + int i; + + for (i = 0; i < sband->n_bitrates; i++) { + rate = &sband->bitrates[i]; + + if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck) + continue; + + if (rate->hw_value == hw_rate) + return i; + else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE && + rate->hw_value_short == hw_rate) + return i; + } + + return 0; +} + +u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, + u32 bitrate) +{ + int i; + + for (i = 0; i < sband->n_bitrates; i++) + if (sband->bitrates[i].bitrate == bitrate) + return i; + + return 0; +} + +static int ath10k_mac_get_rate_hw_value(int bitrate) +{ + int i; + u8 hw_value_prefix = 0; + + if (ath10k_mac_bitrate_is_cck(bitrate)) + hw_value_prefix = WMI_RATE_PREAMBLE_CCK << 6; + + for (i = 0; i < ARRAY_SIZE(ath10k_rates); i++) { + if (ath10k_rates[i].bitrate == bitrate) + return hw_value_prefix | ath10k_rates[i].hw_value; + } + + return -EINVAL; +} + +static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss) +{ + switch ((mcs_map >> (2 * nss)) & 0x3) { + case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1; + case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1; + case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1; + } + return 0; +} + +static u32 +ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) +{ + int nss; + + for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--) + if (ht_mcs_mask[nss]) + return nss + 1; + + return 1; +} + +static u32 +ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) +{ + int nss; + + for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--) + if (vht_mcs_mask[nss]) + return nss + 1; + + return 1; +} + +int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val) +{ + enum wmi_host_platform_type platform_type; + int ret; + + if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map)) + platform_type = WMI_HOST_PLATFORM_LOW_PERF; + else + platform_type = WMI_HOST_PLATFORM_HIGH_PERF; + + ret = ath10k_wmi_ext_resource_config(ar, platform_type, val); + + if (ret && ret != -EOPNOTSUPP) { + ath10k_warn(ar, "failed to configure ext resource: %d\n", ret); + return ret; + } + + return 0; +} /**********/ /* Crypto */ @@ -33,47 +239,66 @@ static int ath10k_send_key(struct ath10k_vif *arvif, struct ieee80211_key_conf *key, enum set_key_cmd cmd, - const u8 *macaddr) + const u8 *macaddr, u32 flags) { + struct ath10k *ar = arvif->ar; struct wmi_vdev_install_key_arg arg = { .vdev_id = arvif->vdev_id, .key_idx = key->keyidx, .key_len = key->keylen, .key_data = key->key, + .key_flags = flags, .macaddr = macaddr, }; - if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) - arg.key_flags = WMI_KEY_PAIRWISE; - else - arg.key_flags = WMI_KEY_GROUP; + lockdep_assert_held(&arvif->ar->conf_mutex); switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: - arg.key_cipher = WMI_CIPHER_AES_CCM; - key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; + arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_CCM]; + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; break; case WLAN_CIPHER_SUITE_TKIP: - arg.key_cipher = WMI_CIPHER_TKIP; + arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_TKIP]; arg.key_txmic_len = 8; arg.key_rxmic_len = 8; break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: - arg.key_cipher = WMI_CIPHER_WEP; - /* AP/IBSS mode requires self-key to be groupwise - * Otherwise pairwise key must be set */ - if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN)) - arg.key_flags = WMI_KEY_PAIRWISE; + arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_WEP]; + break; + case WLAN_CIPHER_SUITE_CCMP_256: + arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_CCM]; + break; + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_GCM]; + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + case WLAN_CIPHER_SUITE_AES_CMAC: + WARN_ON(1); + return -EINVAL; default: - ath10k_warn("cipher %d is not supported\n", key->cipher); + ath10k_warn(ar, "cipher %d is not supported\n", key->cipher); return -EOPNOTSUPP; } + if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + if (cmd == DISABLE_KEY) { - arg.key_cipher = WMI_CIPHER_NONE; - arg.key_data = NULL; + if (flags & WMI_KEY_GROUP) { + /* Not all hardware handles group-key deletion operation + * correctly. Replace the key with a junk value to invalidate it. + */ + get_random_bytes(key->key, key->keylen); + } else { + arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_NONE]; + arg.key_data = NULL; + } } return ath10k_wmi_vdev_install_key(arvif->ar, &arg); @@ -82,19 +307,25 @@ static int ath10k_send_key(struct ath10k_vif *arvif, static int ath10k_install_key(struct ath10k_vif *arvif, struct ieee80211_key_conf *key, enum set_key_cmd cmd, - const u8 *macaddr) + const u8 *macaddr, u32 flags) { struct ath10k *ar = arvif->ar; int ret; + unsigned long time_left; + + lockdep_assert_held(&ar->conf_mutex); - INIT_COMPLETION(ar->install_key_done); + reinit_completion(&ar->install_key_done); - ret = ath10k_send_key(arvif, key, cmd, macaddr); + if (arvif->nohwcrypt) + return 1; + + ret = ath10k_send_key(arvif, key, cmd, macaddr, flags); if (ret) return ret; - ret = wait_for_completion_timeout(&ar->install_key_done, 3*HZ); - if (ret == 0) + time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ); + if (time_left == 0) return -ETIMEDOUT; return 0; @@ -107,9 +338,15 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, struct ath10k_peer *peer; int ret; int i; + u32 flags; lockdep_assert_held(&ar->conf_mutex); + if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP && + arvif->vif->type != NL80211_IFTYPE_ADHOC && + arvif->vif->type != NL80211_IFTYPE_MESH_POINT)) + return -EINVAL; + spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find(ar, arvif->vdev_id, addr); spin_unlock_bh(&ar->data_lock); @@ -121,12 +358,62 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, if (arvif->wep_keys[i] == NULL) continue; - ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY, - addr); - if (ret) - return ret; + switch (arvif->vif->type) { + case NL80211_IFTYPE_AP: + flags = WMI_KEY_PAIRWISE; + + if (arvif->def_wep_key_idx == i) + flags |= WMI_KEY_TX_USAGE; + + ret = ath10k_install_key(arvif, arvif->wep_keys[i], + SET_KEY, addr, flags); + if (ret < 0) + return ret; + break; + case NL80211_IFTYPE_ADHOC: + ret = ath10k_install_key(arvif, arvif->wep_keys[i], + SET_KEY, addr, + WMI_KEY_PAIRWISE); + if (ret < 0) + return ret; + + ret = ath10k_install_key(arvif, arvif->wep_keys[i], + SET_KEY, addr, WMI_KEY_GROUP); + if (ret < 0) + return ret; + break; + default: + WARN_ON(1); + return -EINVAL; + } + spin_lock_bh(&ar->data_lock); peer->keys[i] = arvif->wep_keys[i]; + spin_unlock_bh(&ar->data_lock); + } + + /* In some cases (notably with static WEP IBSS with multiple keys) + * multicast Tx becomes broken. Both pairwise and groupwise keys are + * installed already. Using WMI_KEY_TX_USAGE in different combinations + * didn't seem help. Using def_keyid vdev parameter seems to be + * effective so use that. + * + * FIXME: Revisit. Perhaps this can be done in a less hacky way. + */ + if (arvif->vif->type != NL80211_IFTYPE_ADHOC) + return 0; + + if (arvif->def_wep_key_idx == -1) + return 0; + + ret = ath10k_wmi_vdev_set_param(arvif->ar, + arvif->vdev_id, + arvif->ar->wmi.vdev_param->def_keyid, + arvif->def_wep_key_idx); + if (ret) { + ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; } return 0; @@ -140,6 +427,7 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif, int first_errno = 0; int ret; int i; + u32 flags = 0; lockdep_assert_held(&ar->conf_mutex); @@ -154,21 +442,49 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif, if (peer->keys[i] == NULL) continue; + /* key flags are not required to delete the key */ ret = ath10k_install_key(arvif, peer->keys[i], - DISABLE_KEY, addr); - if (ret && first_errno == 0) + DISABLE_KEY, addr, flags); + if (ret < 0 && first_errno == 0) first_errno = ret; - if (ret) - ath10k_warn("could not remove peer wep key %d (%d)\n", + if (ret < 0) + ath10k_warn(ar, "failed to remove peer wep key %d: %d\n", i, ret); + spin_lock_bh(&ar->data_lock); peer->keys[i] = NULL; + spin_unlock_bh(&ar->data_lock); } return first_errno; } +bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr, + u8 keyidx) +{ + struct ath10k_peer *peer; + int i; + + lockdep_assert_held(&ar->data_lock); + + /* We don't know which vdev this peer belongs to, + * since WMI doesn't give us that information. + * + * FIXME: multi-bss needs to be handled. + */ + peer = ath10k_peer_find(ar, 0, addr); + if (!peer) + return false; + + for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { + if (peer->keys[i] && peer->keys[i]->keyidx == keyidx) + return true; + } + + return false; +} + static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, struct ieee80211_key_conf *key) { @@ -178,18 +494,20 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, int first_errno = 0; int ret; int i; + u32 flags = 0; lockdep_assert_held(&ar->conf_mutex); for (;;) { /* since ath10k_install_key we can't hold data_lock all the - * time, so we try to remove the keys incrementally */ + * time, so we try to remove the keys incrementally + */ spin_lock_bh(&ar->data_lock); i = 0; list_for_each_entry(peer, &ar->peers, list) { for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { if (peer->keys[i] == key) { - memcpy(addr, peer->addr, ETH_ALEN); + ether_addr_copy(addr, peer->addr); peer->keys[i] = NULL; break; } @@ -202,18 +520,51 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, if (i == ARRAY_SIZE(peer->keys)) break; - - ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr); - if (ret && first_errno == 0) + /* key flags are not required to delete the key */ + ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags); + if (ret < 0 && first_errno == 0) first_errno = ret; if (ret) - ath10k_warn("could not remove key for %pM\n", addr); + ath10k_warn(ar, "failed to remove key for %pM: %d\n", + addr, ret); } return first_errno; } +static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif, + struct ieee80211_key_conf *key) +{ + struct ath10k *ar = arvif->ar; + struct ath10k_peer *peer; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(peer, &ar->peers, list) { + if (ether_addr_equal(peer->addr, arvif->vif->addr)) + continue; + + if (ether_addr_equal(peer->addr, arvif->bssid)) + continue; + + if (peer->keys[key->keyidx] == key) + continue; + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n", + arvif->vdev_id, key->keyidx); + + ret = ath10k_install_peer_wep_keys(arvif, peer->addr); + if (ret) { + ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n", + arvif->vdev_id, peer->addr, ret); + return ret; + } + } + + return 0; +} /*********************/ /* General utilities */ @@ -225,10 +576,13 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef) enum wmi_phy_mode phymode = MODE_UNKNOWN; switch (chandef->chan->band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: - phymode = MODE_11G; + if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM) + phymode = MODE_11B; + else + phymode = MODE_11G; break; case NL80211_CHAN_WIDTH_20: phymode = MODE_11NG_HT20; @@ -236,16 +590,12 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef) case NL80211_CHAN_WIDTH_40: phymode = MODE_11NG_HT40; break; - case NL80211_CHAN_WIDTH_5: - case NL80211_CHAN_WIDTH_10: - case NL80211_CHAN_WIDTH_80: - case NL80211_CHAN_WIDTH_80P80: - case NL80211_CHAN_WIDTH_160: + default: phymode = MODE_UNKNOWN; break; } break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: phymode = MODE_11A; @@ -259,10 +609,13 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef) case NL80211_CHAN_WIDTH_80: phymode = MODE_11AC_VHT80; break; - case NL80211_CHAN_WIDTH_5: - case NL80211_CHAN_WIDTH_10: - case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_160: + phymode = MODE_11AC_VHT160; + break; + case NL80211_CHAN_WIDTH_80P80: + phymode = MODE_11AC_VHT80_80; + break; + default: phymode = MODE_UNKNOWN; break; } @@ -295,7 +648,8 @@ static u8 ath10k_parse_mpdudensity(u8 mpdudensity) case 2: case 3: /* Our lower layer calculations limit our precision to - 1 microsecond */ + * 1 microsecond + */ return 1; case 4: return 2; @@ -310,23 +664,179 @@ static u8 ath10k_parse_mpdudensity(u8 mpdudensity) } } -static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr) +int ath10k_mac_vif_chan(struct ieee80211_vif *vif, + struct cfg80211_chan_def *def) { + struct ieee80211_chanctx_conf *conf; + + rcu_read_lock(); + conf = rcu_dereference(vif->bss_conf.chanctx_conf); + if (!conf) { + rcu_read_unlock(); + return -ENOENT; + } + + *def = conf->def; + rcu_read_unlock(); + + return 0; +} + +static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *conf, + void *data) +{ + int *num = data; + + (*num)++; +} + +static int ath10k_mac_num_chanctxs(struct ath10k *ar) +{ + int num = 0; + + ieee80211_iter_chan_contexts_atomic(ar->hw, + ath10k_mac_num_chanctxs_iter, + &num); + + return num; +} + +static void +ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *conf, + void *data) +{ + struct cfg80211_chan_def **def = data; + + *def = &conf->def; +} + +static void ath10k_wait_for_peer_delete_done(struct ath10k *ar, u32 vdev_id, + const u8 *addr) +{ + unsigned long time_left; + int ret; + + if (test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map)) { + ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr); + if (ret) { + ath10k_warn(ar, "failed wait for peer deleted"); + return; + } + + time_left = wait_for_completion_timeout(&ar->peer_delete_done, + 5 * HZ); + if (!time_left) + ath10k_warn(ar, "Timeout in receiving peer delete response\n"); + } +} + +static int ath10k_peer_create(struct ath10k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u32 vdev_id, + const u8 *addr, + enum wmi_peer_type peer_type) +{ + struct ath10k_peer *peer; int ret; lockdep_assert_held(&ar->conf_mutex); - ret = ath10k_wmi_peer_create(ar, vdev_id, addr); - if (ret) + /* Each vdev consumes a peer entry as well. */ + if (ar->num_peers + list_count_nodes(&ar->arvifs) >= ar->max_num_peers) + return -ENOBUFS; + + ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type); + if (ret) { + ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n", + addr, vdev_id, ret); return ret; + } ret = ath10k_wait_for_peer_created(ar, vdev_id, addr); - if (ret) + if (ret) { + ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n", + addr, vdev_id, ret); + return ret; + } + + spin_lock_bh(&ar->data_lock); + + peer = ath10k_peer_find(ar, vdev_id, addr); + if (!peer) { + spin_unlock_bh(&ar->data_lock); + ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n", + addr, vdev_id); + ath10k_wait_for_peer_delete_done(ar, vdev_id, addr); + return -ENOENT; + } + + peer->vif = vif; + peer->sta = sta; + + spin_unlock_bh(&ar->data_lock); + + ar->num_peers++; + + return 0; +} + +static int ath10k_mac_set_kickout(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + u32 param; + int ret; + + param = ar->wmi.pdev_param->sta_kickout_th; + ret = ath10k_wmi_pdev_set_param(ar, param, + ATH10K_KICKOUT_THRESHOLD); + if (ret) { + ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, + ATH10K_KEEPALIVE_MIN_IDLE); + if (ret) { + ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, + ATH10K_KEEPALIVE_MAX_IDLE); + if (ret) { + ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n", + arvif->vdev_id, ret); return ret; + } + + param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, + ATH10K_KEEPALIVE_MAX_UNRESPONSIVE); + if (ret) { + ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } return 0; } +static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value) +{ + struct ath10k *ar = arvif->ar; + u32 vdev_param; + + vdev_param = ar->wmi.vdev_param->rts_threshold; + return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value); +} + static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) { int ret; @@ -341,9 +851,50 @@ static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) if (ret) return ret; + if (test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map)) { + unsigned long time_left; + + time_left = wait_for_completion_timeout + (&ar->peer_delete_done, 5 * HZ); + + if (!time_left) { + ath10k_warn(ar, "Timeout in receiving peer delete response\n"); + return -ETIMEDOUT; + } + } + + ar->num_peers--; + return 0; } +static void ath10k_peer_map_cleanup(struct ath10k *ar, struct ath10k_peer *peer) +{ + int peer_id, i; + + lockdep_assert_held(&ar->conf_mutex); + + for_each_set_bit(peer_id, peer->peer_ids, + ATH10K_MAX_NUM_PEER_IDS) { + ar->peer_map[peer_id] = NULL; + } + + /* Double check that peer is properly un-referenced from + * the peer_map + */ + for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { + if (ar->peer_map[i] == peer) { + ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %p idx %d)\n", + peer->addr, peer, i); + ar->peer_map[i] = NULL; + } + } + + list_del(&peer->list); + kfree(peer); + ar->num_peers--; +} + static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) { struct ath10k_peer *peer, *tmp; @@ -355,417 +906,1326 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) if (peer->vdev_id != vdev_id) continue; - ath10k_warn("removing stale peer %pM from vdev_id %d\n", + ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n", peer->addr, vdev_id); + ath10k_peer_map_cleanup(ar, peer); + } + spin_unlock_bh(&ar->data_lock); +} + +static void ath10k_peer_cleanup_all(struct ath10k *ar) +{ + struct ath10k_peer *peer, *tmp; + int i; + + lockdep_assert_held(&ar->conf_mutex); + + spin_lock_bh(&ar->data_lock); + list_for_each_entry_safe(peer, tmp, &ar->peers, list) { list_del(&peer->list); kfree(peer); } + + for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) + ar->peer_map[i] = NULL; + spin_unlock_bh(&ar->data_lock); -} -/************************/ -/* Interface management */ -/************************/ + ar->num_peers = 0; + ar->num_stations = 0; +} -static inline int ath10k_vdev_setup_sync(struct ath10k *ar) +static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id, + struct ieee80211_sta *sta, + enum wmi_tdls_peer_state state) { int ret; + struct wmi_tdls_peer_update_cmd_arg arg = {}; + struct wmi_tdls_peer_capab_arg cap = {}; + struct wmi_channel_arg chan_arg = {}; - ret = wait_for_completion_timeout(&ar->vdev_setup_done, - ATH10K_VDEV_SETUP_TIMEOUT_HZ); - if (ret == 0) - return -ETIMEDOUT; + lockdep_assert_held(&ar->conf_mutex); + + arg.vdev_id = vdev_id; + arg.peer_state = state; + ether_addr_copy(arg.addr, sta->addr); + + cap.peer_max_sp = sta->max_sp; + cap.peer_uapsd_queues = sta->uapsd_queues; + + if (state == WMI_TDLS_PEER_STATE_CONNECTED && + !sta->tdls_initiator) + cap.is_peer_responder = 1; + + ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg); + if (ret) { + ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n", + arg.addr, vdev_id, ret); + return ret; + } return 0; } -static int ath10k_vdev_start(struct ath10k_vif *arvif) +/************************/ +/* Interface management */ +/************************/ + +void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif) { struct ath10k *ar = arvif->ar; - struct ieee80211_conf *conf = &ar->hw->conf; - struct ieee80211_channel *channel = conf->chandef.chan; - struct wmi_vdev_start_request_arg arg = {}; - int ret = 0; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_held(&ar->data_lock); - INIT_COMPLETION(ar->vdev_setup_done); + if (!arvif->beacon) + return; - arg.vdev_id = arvif->vdev_id; - arg.dtim_period = arvif->dtim_period; - arg.bcn_intval = arvif->beacon_interval; + if (!arvif->beacon_buf) + dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr, + arvif->beacon->len, DMA_TO_DEVICE); - arg.channel.freq = channel->center_freq; + if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED && + arvif->beacon_state != ATH10K_BEACON_SENT)) + return; - arg.channel.band_center_freq1 = conf->chandef.center_freq1; + dev_kfree_skb_any(arvif->beacon); - arg.channel.mode = chan_to_phymode(&conf->chandef); + arvif->beacon = NULL; + arvif->beacon_state = ATH10K_BEACON_SCHEDULED; +} - arg.channel.min_power = channel->max_power * 3; - arg.channel.max_power = channel->max_power * 4; - arg.channel.max_reg_power = channel->max_reg_power * 4; - arg.channel.max_antenna_gain = channel->max_antenna_gain; +static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; - if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { - arg.ssid = arvif->u.ap.ssid; - arg.ssid_len = arvif->u.ap.ssid_len; - arg.hidden_ssid = arvif->u.ap.hidden_ssid; - } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { - arg.ssid = arvif->vif->bss_conf.ssid; - arg.ssid_len = arvif->vif->bss_conf.ssid_len; - } + lockdep_assert_held(&ar->data_lock); - ret = ath10k_wmi_vdev_start(ar, &arg); - if (ret) { - ath10k_warn("WMI vdev start failed: ret %d\n", ret); - return ret; - } + ath10k_mac_vif_beacon_free(arvif); - ret = ath10k_vdev_setup_sync(ar); - if (ret) { - ath10k_warn("vdev setup failed %d\n", ret); - return ret; + if (arvif->beacon_buf) { + if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) + kfree(arvif->beacon_buf); + else + dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, + arvif->beacon_buf, + arvif->beacon_paddr); + arvif->beacon_buf = NULL; } +} - return ret; +static inline int ath10k_vdev_setup_sync(struct ath10k *ar) +{ + unsigned long time_left; + + lockdep_assert_held(&ar->conf_mutex); + + if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) + return -ESHUTDOWN; + + time_left = wait_for_completion_timeout(&ar->vdev_setup_done, + ATH10K_VDEV_SETUP_TIMEOUT_HZ); + if (time_left == 0) + return -ETIMEDOUT; + + return ar->last_wmi_vdev_start_status; } -static int ath10k_vdev_stop(struct ath10k_vif *arvif) +static inline int ath10k_vdev_delete_sync(struct ath10k *ar) { - struct ath10k *ar = arvif->ar; - int ret; + unsigned long time_left; lockdep_assert_held(&ar->conf_mutex); - INIT_COMPLETION(ar->vdev_setup_done); + if (!test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map)) + return 0; - ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id); - if (ret) { - ath10k_warn("WMI vdev stop failed: ret %d\n", ret); - return ret; - } + if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) + return -ESHUTDOWN; - ret = ath10k_vdev_setup_sync(ar); - if (ret) { - ath10k_warn("vdev setup failed %d\n", ret); - return ret; - } + time_left = wait_for_completion_timeout(&ar->vdev_delete_done, + ATH10K_VDEV_DELETE_TIMEOUT_HZ); + if (time_left == 0) + return -ETIMEDOUT; - return ret; + return 0; } -static int ath10k_monitor_start(struct ath10k *ar, int vdev_id) +static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) { - struct ieee80211_channel *channel = ar->hw->conf.chandef.chan; + struct cfg80211_chan_def *chandef = NULL; + struct ieee80211_channel *channel = NULL; struct wmi_vdev_start_request_arg arg = {}; - enum nl80211_channel_type type; int ret = 0; lockdep_assert_held(&ar->conf_mutex); - type = cfg80211_get_chandef_type(&ar->hw->conf.chandef); + ieee80211_iter_chan_contexts_atomic(ar->hw, + ath10k_mac_get_any_chandef_iter, + &chandef); + if (WARN_ON_ONCE(!chandef)) + return -ENOENT; + + channel = chandef->chan; arg.vdev_id = vdev_id; arg.channel.freq = channel->center_freq; - arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1; + arg.channel.band_center_freq1 = chandef->center_freq1; + arg.channel.band_center_freq2 = chandef->center_freq2; /* TODO setup this dynamically, what in case we - don't have any vifs? */ - arg.channel.mode = chan_to_phymode(&ar->hw->conf.chandef); + * don't have any vifs? + */ + arg.channel.mode = chan_to_phymode(chandef); + arg.channel.chan_radar = + !!(channel->flags & IEEE80211_CHAN_RADAR); - arg.channel.min_power = channel->max_power * 3; - arg.channel.max_power = channel->max_power * 4; - arg.channel.max_reg_power = channel->max_reg_power * 4; + arg.channel.min_power = 0; + arg.channel.max_power = channel->max_power * 2; + arg.channel.max_reg_power = channel->max_reg_power * 2; arg.channel.max_antenna_gain = channel->max_antenna_gain; + reinit_completion(&ar->vdev_setup_done); + reinit_completion(&ar->vdev_delete_done); + ret = ath10k_wmi_vdev_start(ar, &arg); if (ret) { - ath10k_warn("Monitor vdev start failed: ret %d\n", ret); + ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n", + vdev_id, ret); return ret; } ret = ath10k_vdev_setup_sync(ar); if (ret) { - ath10k_warn("Monitor vdev setup failed %d\n", ret); + ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n", + vdev_id, ret); return ret; } ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); if (ret) { - ath10k_warn("Monitor vdev up failed: %d\n", ret); + ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n", + vdev_id, ret); goto vdev_stop; } ar->monitor_vdev_id = vdev_id; - ar->monitor_enabled = true; + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n", + ar->monitor_vdev_id); return 0; vdev_stop: ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); if (ret) - ath10k_warn("Monitor vdev stop failed: %d\n", ret); + ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n", + ar->monitor_vdev_id, ret); return ret; } -static int ath10k_monitor_stop(struct ath10k *ar) +static int ath10k_monitor_vdev_stop(struct ath10k *ar) { int ret = 0; lockdep_assert_held(&ar->conf_mutex); - /* For some reasons, ath10k_wmi_vdev_down() here couse - * often ath10k_wmi_vdev_stop() to fail. Next we could - * not run monitor vdev and driver reload - * required. Don't see such problems we skip - * ath10k_wmi_vdev_down() here. - */ + ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id); + if (ret) + ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n", + ar->monitor_vdev_id, ret); + + reinit_completion(&ar->vdev_setup_done); + reinit_completion(&ar->vdev_delete_done); ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); if (ret) - ath10k_warn("Monitor vdev stop failed: %d\n", ret); + ath10k_warn(ar, "failed to request monitor vdev %i stop: %d\n", + ar->monitor_vdev_id, ret); ret = ath10k_vdev_setup_sync(ar); if (ret) - ath10k_warn("Monitor_down sync failed: %d\n", ret); + ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n", + ar->monitor_vdev_id, ret); - ar->monitor_enabled = false; + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n", + ar->monitor_vdev_id); return ret; } -static int ath10k_monitor_create(struct ath10k *ar) +static int ath10k_monitor_vdev_create(struct ath10k *ar) { int bit, ret = 0; lockdep_assert_held(&ar->conf_mutex); - if (ar->monitor_present) { - ath10k_warn("Monitor mode already enabled\n"); - return 0; - } - - bit = ffs(ar->free_vdev_map); - if (bit == 0) { - ath10k_warn("No free VDEV slots\n"); + if (ar->free_vdev_map == 0) { + ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n"); return -ENOMEM; } - ar->monitor_vdev_id = bit - 1; - ar->free_vdev_map &= ~(1 << ar->monitor_vdev_id); + bit = __ffs64(ar->free_vdev_map); + + ar->monitor_vdev_id = bit; ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id, WMI_VDEV_TYPE_MONITOR, 0, ar->mac_addr); if (ret) { - ath10k_warn("WMI vdev monitor create failed: ret %d\n", ret); - goto vdev_fail; + ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n", + ar->monitor_vdev_id, ret); + return ret; } - ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface created, vdev id: %d\n", + ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n", ar->monitor_vdev_id); - ar->monitor_present = true; return 0; +} -vdev_fail: - /* - * Restore the ID to the global map. +static int ath10k_monitor_vdev_delete(struct ath10k *ar) +{ + int ret = 0; + + lockdep_assert_held(&ar->conf_mutex); + + ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id); + if (ret) { + ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n", + ar->monitor_vdev_id, ret); + return ret; + } + + ar->free_vdev_map |= 1LL << ar->monitor_vdev_id; + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n", + ar->monitor_vdev_id); + return ret; +} + +static int ath10k_monitor_start(struct ath10k *ar) +{ + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + ret = ath10k_monitor_vdev_create(ar); + if (ret) { + ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret); + return ret; + } + + ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id); + if (ret) { + ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret); + ath10k_monitor_vdev_delete(ar); + return ret; + } + + ar->monitor_started = true; + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n"); + + return 0; +} + +static int ath10k_monitor_stop(struct ath10k *ar) +{ + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + ret = ath10k_monitor_vdev_stop(ar); + if (ret) { + ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret); + return ret; + } + + ret = ath10k_monitor_vdev_delete(ar); + if (ret) { + ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret); + return ret; + } + + ar->monitor_started = false; + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n"); + + return 0; +} + +static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar) +{ + int num_ctx; + + /* At least one chanctx is required to derive a channel to start + * monitor vdev on. + */ + num_ctx = ath10k_mac_num_chanctxs(ar); + if (num_ctx == 0) + return false; + + /* If there's already an existing special monitor interface then don't + * bother creating another monitor vdev. + */ + if (ar->monitor_arvif) + return false; + + return ar->monitor || + (!test_bit(ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST, + ar->running_fw->fw_file.fw_features) && + (ar->filter_flags & (FIF_OTHER_BSS | FIF_MCAST_ACTION))) || + test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); +} + +static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar) +{ + int num_ctx; + + num_ctx = ath10k_mac_num_chanctxs(ar); + + /* FIXME: Current interface combinations and cfg80211/mac80211 code + * shouldn't allow this but make sure to prevent handling the following + * case anyway since multi-channel DFS hasn't been tested at all. */ - ar->free_vdev_map |= 1 << (ar->monitor_vdev_id); + if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1) + return false; + + return true; +} + +static int ath10k_monitor_recalc(struct ath10k *ar) +{ + bool needed; + bool allowed; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + needed = ath10k_mac_monitor_vdev_is_needed(ar); + allowed = ath10k_mac_monitor_vdev_is_allowed(ar); + + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac monitor recalc started? %d needed? %d allowed? %d\n", + ar->monitor_started, needed, allowed); + + if (WARN_ON(needed && !allowed)) { + if (ar->monitor_started) { + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n"); + + ret = ath10k_monitor_stop(ar); + if (ret) + ath10k_warn(ar, "failed to stop disallowed monitor: %d\n", + ret); + /* not serious */ + } + + return -EPERM; + } + + if (needed == ar->monitor_started) + return 0; + + if (needed) + return ath10k_monitor_start(ar); + else + return ath10k_monitor_stop(ar); +} + +static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + + lockdep_assert_held(&ar->conf_mutex); + + if (!arvif->is_started) { + ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n"); + return false; + } + + return true; +} + +static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + u32 vdev_param; + + lockdep_assert_held(&ar->conf_mutex); + + vdev_param = ar->wmi.vdev_param->protection_mode; + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n", + arvif->vdev_id, arvif->use_cts_prot); + + return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, + arvif->use_cts_prot ? 1 : 0); +} + +static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + u32 vdev_param, rts_cts = 0; + + lockdep_assert_held(&ar->conf_mutex); + + vdev_param = ar->wmi.vdev_param->enable_rtscts; + + rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET); + + if (arvif->num_legacy_stations > 0) + rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES, + WMI_RTSCTS_PROFILE); + else + rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES, + WMI_RTSCTS_PROFILE); + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n", + arvif->vdev_id, rts_cts); + + return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, + rts_cts); +} + +static int ath10k_start_cac(struct ath10k *ar) +{ + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); + + ret = ath10k_monitor_recalc(ar); + if (ret) { + ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret); + clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n", + ar->monitor_vdev_id); + + return 0; +} + +static int ath10k_stop_cac(struct ath10k *ar) +{ + lockdep_assert_held(&ar->conf_mutex); + + /* CAC is not running - do nothing */ + if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) + return 0; + + clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); + ath10k_monitor_stop(ar); + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n"); + + return 0; +} + +static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *conf, + void *data) +{ + bool *ret = data; + + if (!*ret && conf->radar_enabled) + *ret = true; +} + +static bool ath10k_mac_has_radar_enabled(struct ath10k *ar) +{ + bool has_radar = false; + + ieee80211_iter_chan_contexts_atomic(ar->hw, + ath10k_mac_has_radar_iter, + &has_radar); + + return has_radar; +} + +static void ath10k_recalc_radar_detection(struct ath10k *ar) +{ + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + ath10k_stop_cac(ar); + + if (!ath10k_mac_has_radar_enabled(ar)) + return; + + if (ar->num_started_vdevs > 0) + return; + + ret = ath10k_start_cac(ar); + if (ret) { + /* + * Not possible to start CAC on current channel so starting + * radiation is not allowed, make this channel DFS_UNAVAILABLE + * by indicating that radar was detected. + */ + ath10k_warn(ar, "failed to start CAC: %d\n", ret); + ieee80211_radar_detected(ar->hw, NULL); + } +} + +static int ath10k_vdev_stop(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + reinit_completion(&ar->vdev_setup_done); + reinit_completion(&ar->vdev_delete_done); + + ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id); + if (ret) { + ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + ret = ath10k_vdev_setup_sync(ar); + if (ret) { + ath10k_warn(ar, "failed to synchronize setup for vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + WARN_ON(ar->num_started_vdevs == 0); + + if (ar->num_started_vdevs != 0) { + ar->num_started_vdevs--; + ath10k_recalc_radar_detection(ar); + } + return ret; } -static int ath10k_monitor_destroy(struct ath10k *ar) +static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, + const struct cfg80211_chan_def *chandef, + bool restart) { + struct ath10k *ar = arvif->ar; + struct wmi_vdev_start_request_arg arg = {}; int ret = 0; lockdep_assert_held(&ar->conf_mutex); - if (!ar->monitor_present) - return 0; + reinit_completion(&ar->vdev_setup_done); + reinit_completion(&ar->vdev_delete_done); + + arg.vdev_id = arvif->vdev_id; + arg.dtim_period = arvif->dtim_period; + arg.bcn_intval = arvif->beacon_interval; + + arg.channel.freq = chandef->chan->center_freq; + arg.channel.band_center_freq1 = chandef->center_freq1; + arg.channel.band_center_freq2 = chandef->center_freq2; + arg.channel.mode = chan_to_phymode(chandef); + + arg.channel.min_power = 0; + arg.channel.max_power = chandef->chan->max_power * 2; + arg.channel.max_reg_power = chandef->chan->max_reg_power * 2; + arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain; + + if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { + arg.ssid = arvif->u.ap.ssid; + arg.ssid_len = arvif->u.ap.ssid_len; + arg.hidden_ssid = arvif->u.ap.hidden_ssid; + + /* For now allow DFS for AP mode */ + arg.channel.chan_radar = + !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); + } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { + arg.ssid = arvif->vif->cfg.ssid; + arg.ssid_len = arvif->vif->cfg.ssid_len; + } + + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac vdev %d start center_freq %d phymode %s\n", + arg.vdev_id, arg.channel.freq, + ath10k_wmi_phymode_str(arg.channel.mode)); + + if (restart) + ret = ath10k_wmi_vdev_restart(ar, &arg); + else + ret = ath10k_wmi_vdev_start(ar, &arg); - ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id); if (ret) { - ath10k_warn("WMI vdev monitor delete failed: %d\n", ret); + ath10k_warn(ar, "failed to start WMI vdev %i: %d\n", + arg.vdev_id, ret); return ret; } - ar->free_vdev_map |= 1 << (ar->monitor_vdev_id); - ar->monitor_present = false; + ret = ath10k_vdev_setup_sync(ar); + if (ret) { + ath10k_warn(ar, + "failed to synchronize setup for vdev %i restart %d: %d\n", + arg.vdev_id, restart, ret); + return ret; + } + + ar->num_started_vdevs++; + ath10k_recalc_radar_detection(ar); - ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface destroyed, vdev id: %d\n", - ar->monitor_vdev_id); return ret; } +static int ath10k_vdev_start(struct ath10k_vif *arvif, + const struct cfg80211_chan_def *def) +{ + return ath10k_vdev_start_restart(arvif, def, false); +} + +static int ath10k_vdev_restart(struct ath10k_vif *arvif, + const struct cfg80211_chan_def *def) +{ + return ath10k_vdev_start_restart(arvif, def, true); +} + +static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif, + struct sk_buff *bcn) +{ + struct ath10k *ar = arvif->ar; + struct ieee80211_mgmt *mgmt; + const u8 *p2p_ie; + int ret; + + if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p) + return 0; + + mgmt = (void *)bcn->data; + p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, + mgmt->u.beacon.variable, + bcn->len - (mgmt->u.beacon.variable - + bcn->data)); + if (!p2p_ie) + return -ENOENT; + + ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie); + if (ret) { + ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + return 0; +} + +static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui, + u8 oui_type, size_t ie_offset) +{ + size_t len; + const u8 *next; + const u8 *end; + u8 *ie; + + if (WARN_ON(skb->len < ie_offset)) + return -EINVAL; + + ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type, + skb->data + ie_offset, + skb->len - ie_offset); + if (!ie) + return -ENOENT; + + len = ie[1] + 2; + end = skb->data + skb->len; + next = ie + len; + + if (WARN_ON(next > end)) + return -EINVAL; + + memmove(ie, next, end - next); + skb_trim(skb, skb->len - len); + + return 0; +} + +static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + struct ieee80211_hw *hw = ar->hw; + struct ieee80211_vif *vif = arvif->vif; + struct ieee80211_mutable_offsets offs = {}; + struct sk_buff *bcn; + int ret; + + if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) + return 0; + + if (arvif->vdev_type != WMI_VDEV_TYPE_AP && + arvif->vdev_type != WMI_VDEV_TYPE_IBSS) + return 0; + + bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0); + if (!bcn) { + ath10k_warn(ar, "failed to get beacon template from mac80211\n"); + return -EPERM; + } + + ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn); + if (ret) { + ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret); + kfree_skb(bcn); + return ret; + } + + /* P2P IE is inserted by firmware automatically (as configured above) + * so remove it from the base beacon template to avoid duplicate P2P + * IEs in beacon frames. + */ + ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, + offsetof(struct ieee80211_mgmt, + u.beacon.variable)); + + ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0, + 0, NULL, 0); + kfree_skb(bcn); + + if (ret) { + ath10k_warn(ar, "failed to submit beacon template command: %d\n", + ret); + return ret; + } + + return 0; +} + +static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + struct ieee80211_hw *hw = ar->hw; + struct ieee80211_vif *vif = arvif->vif; + struct sk_buff *prb; + int ret; + + if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) + return 0; + + if (arvif->vdev_type != WMI_VDEV_TYPE_AP) + return 0; + + /* For mesh, probe response and beacon share the same template */ + if (ieee80211_vif_is_mesh(vif)) + return 0; + + prb = ieee80211_proberesp_get(hw, vif); + if (!prb) { + ath10k_warn(ar, "failed to get probe resp template from mac80211\n"); + return -EPERM; + } + + ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb); + kfree_skb(prb); + + if (ret) { + ath10k_warn(ar, "failed to submit probe resp template command: %d\n", + ret); + return ret; + } + + return 0; +} + +static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + struct cfg80211_chan_def def; + int ret; + + /* When originally vdev is started during assign_vif_chanctx() some + * information is missing, notably SSID. Firmware revisions with beacon + * offloading require the SSID to be provided during vdev (re)start to + * handle hidden SSID properly. + * + * Vdev restart must be done after vdev has been both started and + * upped. Otherwise some firmware revisions (at least 10.2) fail to + * deliver vdev restart response event causing timeouts during vdev + * syncing in ath10k. + * + * Note: The vdev down/up and template reinstallation could be skipped + * since only wmi-tlv firmware are known to have beacon offload and + * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart + * response delivery. It's probably more robust to keep it as is. + */ + if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) + return 0; + + if (WARN_ON(!arvif->is_started)) + return -EINVAL; + + if (WARN_ON(!arvif->is_up)) + return -EINVAL; + + if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) + return -EINVAL; + + ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); + if (ret) { + ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise + * firmware will crash upon vdev up. + */ + + ret = ath10k_mac_setup_bcn_tmpl(arvif); + if (ret) { + ath10k_warn(ar, "failed to update beacon template: %d\n", ret); + return ret; + } + + ret = ath10k_mac_setup_prb_tmpl(arvif); + if (ret) { + ath10k_warn(ar, "failed to update presp template: %d\n", ret); + return ret; + } + + ret = ath10k_vdev_restart(arvif, &def); + if (ret) { + ath10k_warn(ar, "failed to restart ap vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, + arvif->bssid); + if (ret) { + ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + return 0; +} + static void ath10k_control_beaconing(struct ath10k_vif *arvif, - struct ieee80211_bss_conf *info) + struct ieee80211_bss_conf *info) { + struct ath10k *ar = arvif->ar; int ret = 0; + lockdep_assert_held(&arvif->ar->conf_mutex); + if (!info->enable_beacon) { - ath10k_vdev_stop(arvif); + ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); + if (ret) + ath10k_warn(ar, "failed to down vdev_id %i: %d\n", + arvif->vdev_id, ret); + + arvif->is_up = false; + + spin_lock_bh(&arvif->ar->data_lock); + ath10k_mac_vif_beacon_free(arvif); + spin_unlock_bh(&arvif->ar->data_lock); + return; } arvif->tx_seq_no = 0x1000; - ret = ath10k_vdev_start(arvif); - if (ret) + arvif->aid = 0; + ether_addr_copy(arvif->bssid, info->bssid); + + ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, + arvif->bssid); + if (ret) { + ath10k_warn(ar, "failed to bring up vdev %d: %i\n", + arvif->vdev_id, ret); return; + } + + arvif->is_up = true; - ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, 0, info->bssid); + ret = ath10k_mac_vif_fix_hidden_ssid(arvif); if (ret) { - ath10k_warn("Failed to bring up VDEV: %d\n", - arvif->vdev_id); + ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n", + arvif->vdev_id, ret); return; } - ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d up\n", arvif->vdev_id); + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); } static void ath10k_control_ibss(struct ath10k_vif *arvif, - struct ieee80211_bss_conf *info, - const u8 self_peer[ETH_ALEN]) + struct ieee80211_vif *vif) { + struct ath10k *ar = arvif->ar; + u32 vdev_param; int ret = 0; - if (!info->ibss_joined) { - ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer); - if (ret) - ath10k_warn("Failed to delete IBSS self peer:%pM for VDEV:%d ret:%d\n", - self_peer, arvif->vdev_id, ret); - - if (is_zero_ether_addr(arvif->u.ibss.bssid)) - return; + lockdep_assert_held(&arvif->ar->conf_mutex); - ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, - arvif->u.ibss.bssid); - if (ret) { - ath10k_warn("Failed to delete IBSS BSSID peer:%pM for VDEV:%d ret:%d\n", - arvif->u.ibss.bssid, arvif->vdev_id, ret); + if (!vif->cfg.ibss_joined) { + if (is_zero_ether_addr(arvif->bssid)) return; - } - - memset(arvif->u.ibss.bssid, 0, ETH_ALEN); - return; - } + eth_zero_addr(arvif->bssid); - ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer); - if (ret) { - ath10k_warn("Failed to create IBSS self peer:%pM for VDEV:%d ret:%d\n", - self_peer, arvif->vdev_id, ret); return; } - ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, - WMI_VDEV_PARAM_ATIM_WINDOW, + vdev_param = arvif->ar->wmi.vdev_param->atim_window; + ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param, ATH10K_DEFAULT_ATIM); if (ret) - ath10k_warn("Failed to set IBSS ATIM for VDEV:%d ret:%d\n", + ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n", arvif->vdev_id, ret); } -/* - * Review this when mac80211 gains per-interface powersave support. - */ -static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif) +static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + u32 param; + u32 value; + int ret; + + lockdep_assert_held(&arvif->ar->conf_mutex); + + if (arvif->u.sta.uapsd) + value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER; + else + value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS; + + param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD; + ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value); + if (ret) { + ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n", + value, arvif->vdev_id, ret); + return ret; + } + + return 0; +} + +static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + u32 param; + u32 value; + int ret; + + lockdep_assert_held(&arvif->ar->conf_mutex); + + if (arvif->u.sta.uapsd) + value = WMI_STA_PS_PSPOLL_COUNT_UAPSD; + else + value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX; + + param = WMI_STA_PS_PARAM_PSPOLL_COUNT; + ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, + param, value); + if (ret) { + ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n", + value, arvif->vdev_id, ret); + return ret; + } + + return 0; +} + +static int ath10k_mac_num_vifs_started(struct ath10k *ar) { - struct ath10k_generic_iter *ar_iter = data; - struct ieee80211_conf *conf = &ar_iter->ar->hw->conf; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif; + int num = 0; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) + if (arvif->is_started) + num++; + + return num; +} + +static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + struct ieee80211_vif *vif = arvif->vif; + struct ieee80211_conf *conf = &ar->hw->conf; enum wmi_sta_powersave_param param; enum wmi_sta_ps_mode psmode; int ret; + int ps_timeout; + bool enable_ps; - if (vif->type != NL80211_IFTYPE_STATION) - return; + lockdep_assert_held(&arvif->ar->conf_mutex); + + if (arvif->vif->type != NL80211_IFTYPE_STATION) + return 0; - if (conf->flags & IEEE80211_CONF_PS) { + enable_ps = arvif->ps; + + if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 && + !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT, + ar->running_fw->fw_file.fw_features)) { + ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n", + arvif->vdev_id); + enable_ps = false; + } + + if (!arvif->is_started) { + /* mac80211 can update vif powersave state while disconnected. + * Firmware doesn't behave nicely and consumes more power than + * necessary if PS is disabled on a non-started vdev. Hence + * force-enable PS for non-running vdevs. + */ + psmode = WMI_STA_PS_MODE_ENABLED; + } else if (enable_ps) { psmode = WMI_STA_PS_MODE_ENABLED; param = WMI_STA_PS_PARAM_INACTIVITY_TIME; - ret = ath10k_wmi_set_sta_ps_param(ar_iter->ar, - arvif->vdev_id, - param, - conf->dynamic_ps_timeout); - if (ret) { - ath10k_warn("Failed to set inactivity time for VDEV: %d\n", - arvif->vdev_id); - return; + ps_timeout = conf->dynamic_ps_timeout; + if (ps_timeout == 0) { + /* Firmware doesn't like 0 */ + ps_timeout = ieee80211_tu_to_usec( + vif->bss_conf.beacon_int) / 1000; } - ar_iter->ret = ret; + ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, + ps_timeout); + if (ret) { + ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n", + arvif->vdev_id, ret); + return ret; + } } else { psmode = WMI_STA_PS_MODE_DISABLED; } - ar_iter->ret = ath10k_wmi_set_psmode(ar_iter->ar, arvif->vdev_id, - psmode); - if (ar_iter->ret) - ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n", - psmode, arvif->vdev_id); - else - ath10k_dbg(ATH10K_DBG_MAC, "Set PS Mode: %d for VDEV: %d\n", - psmode, arvif->vdev_id); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n", + arvif->vdev_id, psmode ? "enable" : "disable"); + + ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode); + if (ret) { + ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n", + psmode, arvif->vdev_id, ret); + return ret; + } + + return 0; +} + +static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + struct wmi_sta_keepalive_arg arg = {}; + int ret; + + lockdep_assert_held(&arvif->ar->conf_mutex); + + if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + return 0; + + if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map)) + return 0; + + /* Some firmware revisions have a bug and ignore the `enabled` field. + * Instead use the interval to disable the keepalive. + */ + arg.vdev_id = arvif->vdev_id; + arg.enabled = 1; + arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME; + arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE; + + ret = ath10k_wmi_sta_keepalive(ar, &arg); + if (ret) { + ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + return 0; +} + +static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + struct ieee80211_vif *vif = arvif->vif; + int ret; + + lockdep_assert_held(&arvif->ar->conf_mutex); + + if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))) + return; + + if (arvif->vdev_type != WMI_VDEV_TYPE_AP) + return; + + if (!vif->bss_conf.csa_active) + return; + + if (!arvif->is_up) + return; + + if (!ieee80211_beacon_cntdwn_is_complete(vif, 0)) { + ieee80211_beacon_update_cntdwn(vif, 0); + + ret = ath10k_mac_setup_bcn_tmpl(arvif); + if (ret) + ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n", + ret); + + ret = ath10k_mac_setup_prb_tmpl(arvif); + if (ret) + ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n", + ret); + } else { + ieee80211_csa_finish(vif, 0); + } +} + +static void ath10k_mac_vif_ap_csa_work(struct work_struct *work) +{ + struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, + ap_csa_work); + struct ath10k *ar = arvif->ar; + + mutex_lock(&ar->conf_mutex); + ath10k_mac_vif_ap_csa_count_down(arvif); + mutex_unlock(&ar->conf_mutex); +} + +static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct sk_buff *skb = data; + struct ieee80211_mgmt *mgmt = (void *)skb->data; + struct ath10k_vif *arvif = (void *)vif->drv_priv; + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid)) + return; + + cancel_delayed_work(&arvif->connection_loss_work); +} + +void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb) +{ + ieee80211_iterate_active_interfaces_atomic(ar->hw, + ATH10K_ITER_NORMAL_FLAGS, + ath10k_mac_handle_beacon_iter, + skb); +} + +static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + u32 *vdev_id = data; + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct ath10k *ar = arvif->ar; + struct ieee80211_hw *hw = ar->hw; + + if (arvif->vdev_id != *vdev_id) + return; + + if (!arvif->is_up) + return; + + ieee80211_beacon_loss(vif); + + /* Firmware doesn't report beacon loss events repeatedly. If AP probe + * (done by mac80211) succeeds but beacons do not resume then it + * doesn't make sense to continue operation. Queue connection loss work + * which can be cancelled when beacon is received. + */ + ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work, + ATH10K_CONNECTION_LOSS_HZ); +} + +void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id) +{ + ieee80211_iterate_active_interfaces_atomic(ar->hw, + ATH10K_ITER_NORMAL_FLAGS, + ath10k_mac_handle_beacon_miss_iter, + &vdev_id); +} + +static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work) +{ + struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, + connection_loss_work.work); + struct ieee80211_vif *vif = arvif->vif; + + if (!arvif->is_up) + return; + + ieee80211_connection_loss(vif); } /**********************/ /* Station management */ /**********************/ +static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar, + struct ieee80211_vif *vif) +{ + /* Some firmware revisions have unstable STA powersave when listen + * interval is set too high (e.g. 5). The symptoms are firmware doesn't + * generate NullFunc frames properly even if buffered frames have been + * indicated in Beacon TIM. Firmware would seldom wake up to pull + * buffered frames. Often pinging the device from AP would simply fail. + * + * As a workaround set it to 1. + */ + if (vif->type == NL80211_IFTYPE_STATION) + return 1; + + return ar->hw->conf.listen_interval; +} + static void ath10k_peer_assoc_h_basic(struct ath10k *ar, - struct ath10k_vif *arvif, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_bss_conf *bss_conf, struct wmi_peer_assoc_complete_arg *arg) { - memcpy(arg->addr, sta->addr, ETH_ALEN); - arg->vdev_id = arvif->vdev_id; - arg->peer_aid = sta->aid; - arg->peer_flags |= WMI_PEER_AUTH; + struct ath10k_vif *arvif = (void *)vif->drv_priv; + u32 aid; - if (arvif->vdev_type == WMI_VDEV_TYPE_STA) - /* - * Seems FW have problems with Power Save in STA - * mode when we setup this parameter to high (eg. 5). - * Often we see that FW don't send NULL (with clean P flags) - * frame even there is info about buffered frames in beacons. - * Sometimes we have to wait more than 10 seconds before FW - * will wakeup. Often sending one ping from AP to our device - * just fail (more than 50%). - * - * Seems setting this FW parameter to 1 couse FW - * will check every beacon and will wakup immediately - * after detection buffered data. - */ - arg->peer_listen_intval = 1; + lockdep_assert_held(&ar->conf_mutex); + + if (vif->type == NL80211_IFTYPE_STATION) + aid = vif->cfg.aid; else - arg->peer_listen_intval = ar->hw->conf.listen_interval; + aid = sta->aid; + ether_addr_copy(arg->addr, sta->addr); + arg->vdev_id = arvif->vdev_id; + arg->peer_aid = aid; + arg->peer_flags |= arvif->ar->wmi.peer_flags->auth; + arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif); arg->peer_num_spatial_streams = 1; - - /* - * The assoc capabilities are available only in managed mode. - */ - if (arvif->vdev_type == WMI_VDEV_TYPE_STA && bss_conf) - arg->peer_caps = bss_conf->assoc_capability; + arg->peer_caps = vif->bss_conf.assoc_capability; } static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, - struct ath10k_vif *arvif, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct wmi_peer_assoc_complete_arg *arg) { - struct ieee80211_vif *vif = arvif->vif; struct ieee80211_bss_conf *info = &vif->bss_conf; + struct cfg80211_chan_def def; struct cfg80211_bss *bss; const u8 *rsnie = NULL; const u8 *wpaie = NULL; - bss = cfg80211_get_bss(ar->hw->wiphy, ar->hw->conf.chandef.chan, - info->bssid, NULL, 0, 0, 0); + lockdep_assert_held(&ar->conf_mutex); + + if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) + return; + + bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, + vif->cfg.ssid_len ? vif->cfg.ssid : NULL, + vif->cfg.ssid_len, + IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); if (bss) { const struct cfg80211_bss_ies *ies; @@ -775,37 +2235,55 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, ies = rcu_dereference(bss->ies); wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, - WLAN_OUI_TYPE_MICROSOFT_WPA, - ies->data, - ies->len); + WLAN_OUI_TYPE_MICROSOFT_WPA, + ies->data, + ies->len); rcu_read_unlock(); cfg80211_put_bss(ar->hw->wiphy, bss); } /* FIXME: base on RSN IE/WPA IE is a correct idea? */ if (rsnie || wpaie) { - ath10k_dbg(ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__); - arg->peer_flags |= WMI_PEER_NEED_PTK_4_WAY; + ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__); + arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way; } if (wpaie) { - ath10k_dbg(ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__); - arg->peer_flags |= WMI_PEER_NEED_GTK_2_WAY; + ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__); + arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way; + } + + if (sta->mfp && + test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT, + ar->running_fw->fw_file.fw_features)) { + arg->peer_flags |= ar->wmi.peer_flags->pmf; } } static void ath10k_peer_assoc_h_rates(struct ath10k *ar, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct wmi_peer_assoc_complete_arg *arg) { + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; + struct cfg80211_chan_def def; const struct ieee80211_supported_band *sband; const struct ieee80211_rate *rates; + enum nl80211_band band; u32 ratemask; + u8 rate; int i; - sband = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band]; - ratemask = sta->supp_rates[ar->hw->conf.chandef.chan->band]; + lockdep_assert_held(&ar->conf_mutex); + + if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) + return; + + band = def.chan->band; + sband = ar->hw->wiphy->bands[band]; + ratemask = sta->deflink.supp_rates[band]; + ratemask &= arvif->bitrate_mask.control[band].legacy; rates = sband->bitrates; rateset->num_rates = 0; @@ -814,23 +2292,68 @@ static void ath10k_peer_assoc_h_rates(struct ath10k *ar, if (!(ratemask & 1)) continue; - rateset->rates[rateset->num_rates] = rates->hw_value; + rate = ath10k_mac_bitrate_to_rate(rates->bitrate); + rateset->rates[rateset->num_rates] = rate; rateset->num_rates++; } } +static bool +ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) +{ + int nss; + + for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++) + if (ht_mcs_mask[nss]) + return false; + + return true; +} + +static bool +ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) +{ + int nss; + + for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) + if (vht_mcs_mask[nss]) + return false; + + return true; +} + static void ath10k_peer_assoc_h_ht(struct ath10k *ar, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct wmi_peer_assoc_complete_arg *arg) { - const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - int smps; + const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct cfg80211_chan_def def; + enum nl80211_band band; + const u8 *ht_mcs_mask; + const u16 *vht_mcs_mask; int i, n; + u8 max_nss; + u32 stbc; + + lockdep_assert_held(&ar->conf_mutex); + + if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) + return; if (!ht_cap->ht_supported) return; - arg->peer_flags |= WMI_PEER_HT; + band = def.chan->band; + ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; + vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; + + if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) && + ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) + return; + + arg->peer_flags |= ar->wmi.peer_flags->ht; arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + ht_cap->ampdu_factor)) - 1; @@ -841,42 +2364,32 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, arg->peer_rate_caps |= WMI_RC_HT_FLAG; if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) - arg->peer_flags |= WMI_PEER_LDPC; + arg->peer_flags |= ar->wmi.peer_flags->ldbc; - if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) { - arg->peer_flags |= WMI_PEER_40MHZ; + if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) { + arg->peer_flags |= ar->wmi.peer_flags->bw40; arg->peer_rate_caps |= WMI_RC_CW40_FLAG; } - if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) - arg->peer_rate_caps |= WMI_RC_SGI_FLAG; + if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) { + if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) + arg->peer_rate_caps |= WMI_RC_SGI_FLAG; - if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) - arg->peer_rate_caps |= WMI_RC_SGI_FLAG; + if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) + arg->peer_rate_caps |= WMI_RC_SGI_FLAG; + } if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) { arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG; - arg->peer_flags |= WMI_PEER_STBC; + arg->peer_flags |= ar->wmi.peer_flags->stbc; } if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) { - u32 stbc; stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC; stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT; stbc = stbc << WMI_RC_RX_STBC_FLAG_S; arg->peer_rate_caps |= stbc; - arg->peer_flags |= WMI_PEER_STBC; - } - - smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; - smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; - - if (smps == WLAN_HT_CAP_SM_PS_STATIC) { - arg->peer_flags |= WMI_PEER_SPATIAL_MUX; - arg->peer_flags |= WMI_PEER_STATIC_MIMOPS; - } else if (smps == WLAN_HT_CAP_SM_PS_DYNAMIC) { - arg->peer_flags |= WMI_PEER_SPATIAL_MUX; - arg->peer_flags |= WMI_PEER_DYN_MIMOPS; + arg->peer_flags |= ar->wmi.peer_flags->stbc; } if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2]) @@ -884,37 +2397,52 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, else if (ht_cap->mcs.rx_mask[1]) arg->peer_rate_caps |= WMI_RC_DS_FLAG; - for (i = 0, n = 0; i < IEEE80211_HT_MCS_MASK_LEN*8; i++) - if (ht_cap->mcs.rx_mask[i/8] & (1 << i%8)) + for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++) + if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) && + (ht_mcs_mask[i / 8] & BIT(i % 8))) { + max_nss = (i / 8) + 1; arg->peer_ht_rates.rates[n++] = i; + } - arg->peer_ht_rates.num_rates = n; - arg->peer_num_spatial_streams = max((n+7) / 8, 1); + /* + * This is a workaround for HT-enabled STAs which break the spec + * and have no HT capabilities RX mask (no HT RX MCS map). + * + * As per spec, in section 20.3.5 Modulation and coding scheme (MCS), + * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs. + * + * Firmware asserts if such situation occurs. + */ + if (n == 0) { + arg->peer_ht_rates.num_rates = 8; + for (i = 0; i < arg->peer_ht_rates.num_rates; i++) + arg->peer_ht_rates.rates[i] = i; + } else { + arg->peer_ht_rates.num_rates = n; + arg->peer_num_spatial_streams = min(sta->deflink.rx_nss, + max_nss); + } - ath10k_dbg(ATH10K_DBG_MAC, "mcs cnt %d nss %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", + arg->addr, arg->peer_ht_rates.num_rates, arg->peer_num_spatial_streams); } -static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar, - struct ath10k_vif *arvif, - struct ieee80211_sta *sta, - struct ieee80211_bss_conf *bss_conf, - struct wmi_peer_assoc_complete_arg *arg) +static int ath10k_peer_assoc_qos_ap(struct ath10k *ar, + struct ath10k_vif *arvif, + struct ieee80211_sta *sta) { u32 uapsd = 0; u32 max_sp = 0; + int ret = 0; - if (sta->wme) - arg->peer_flags |= WMI_PEER_QOS; + lockdep_assert_held(&ar->conf_mutex); if (sta->wme && sta->uapsd_queues) { - ath10k_dbg(ATH10K_DBG_MAC, "uapsd_queues: 0x%X, max_sp: %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n", sta->uapsd_queues, sta->max_sp); - arg->peer_flags |= WMI_PEER_APSD; - arg->peer_flags |= WMI_RC_UAPSD_FLAG; - if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN | WMI_AP_PS_UAPSD_AC3_TRIGGER_EN; @@ -928,111 +2456,349 @@ static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar, uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN | WMI_AP_PS_UAPSD_AC0_TRIGGER_EN; - if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP) max_sp = sta->max_sp; - ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, - sta->addr, - WMI_AP_PS_PEER_PARAM_UAPSD, - uapsd); + ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, + sta->addr, + WMI_AP_PS_PEER_PARAM_UAPSD, + uapsd); + if (ret) { + ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } - ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, - sta->addr, - WMI_AP_PS_PEER_PARAM_MAX_SP, - max_sp); + ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, + sta->addr, + WMI_AP_PS_PEER_PARAM_MAX_SP, + max_sp); + if (ret) { + ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } /* TODO setup this based on STA listen interval and - beacon interval. Currently we don't know - sta->listen_interval - mac80211 patch required. - Currently use 10 seconds */ - ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, - sta->addr, - WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, - 10); + * beacon interval. Currently we don't know + * sta->listen_interval - mac80211 patch required. + * Currently use 10 seconds + */ + ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr, + WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, + 10); + if (ret) { + ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } } + + return 0; } -static void ath10k_peer_assoc_h_qos_sta(struct ath10k *ar, - struct ath10k_vif *arvif, - struct ieee80211_sta *sta, - struct ieee80211_bss_conf *bss_conf, - struct wmi_peer_assoc_complete_arg *arg) +static u16 +ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set, + const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX]) { - if (bss_conf->qos) - arg->peer_flags |= WMI_PEER_QOS; + int idx_limit; + int nss; + u16 mcs_map; + u16 mcs; + + for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) { + mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) & + vht_mcs_limit[nss]; + + if (mcs_map) + idx_limit = fls(mcs_map) - 1; + else + idx_limit = -1; + + switch (idx_limit) { + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + default: + /* see ath10k_mac_can_set_bitrate_mask() */ + WARN_ON(1); + fallthrough; + case -1: + mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED; + break; + case 7: + mcs = IEEE80211_VHT_MCS_SUPPORT_0_7; + break; + case 8: + mcs = IEEE80211_VHT_MCS_SUPPORT_0_8; + break; + case 9: + mcs = IEEE80211_VHT_MCS_SUPPORT_0_9; + break; + } + + tx_mcs_set &= ~(0x3 << (nss * 2)); + tx_mcs_set |= mcs << (nss * 2); + } + + return tx_mcs_set; +} + +static u32 get_160mhz_nss_from_maxrate(int rate) +{ + u32 nss; + + switch (rate) { + case 780: + nss = 1; + break; + case 1560: + nss = 2; + break; + case 2106: + nss = 3; /* not support MCS9 from spec*/ + break; + case 3120: + nss = 4; + break; + default: + nss = 1; + } + + return nss; } static void ath10k_peer_assoc_h_vht(struct ath10k *ar, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct wmi_peer_assoc_complete_arg *arg) { - const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; + const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct ath10k_hw_params *hw = &ar->hw_params; + struct cfg80211_chan_def def; + enum nl80211_band band; + const u16 *vht_mcs_mask; + u8 ampdu_factor; + u8 max_nss, vht_mcs; + int i; + + if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) + return; if (!vht_cap->vht_supported) return; - arg->peer_flags |= WMI_PEER_VHT; + band = def.chan->band; + vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; + + if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) + return; + + arg->peer_flags |= ar->wmi.peer_flags->vht; + + if (def.chan->band == NL80211_BAND_2GHZ) + arg->peer_flags |= ar->wmi.peer_flags->vht_2g; arg->peer_vht_caps = vht_cap->cap; - if (sta->bandwidth == IEEE80211_STA_RX_BW_80) - arg->peer_flags |= WMI_PEER_80MHZ; + ampdu_factor = (vht_cap->cap & + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >> + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; + /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to + * zero in VHT IE. Using it would result in degraded throughput. + * arg->peer_max_mpdu at this point contains HT max_mpdu so keep + * it if VHT max_mpdu is smaller. + */ + arg->peer_max_mpdu = max(arg->peer_max_mpdu, + (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR + + ampdu_factor)) - 1); + + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) + arg->peer_flags |= ar->wmi.peer_flags->bw80; + + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) + arg->peer_flags |= ar->wmi.peer_flags->bw160; + + /* Calculate peer NSS capability from VHT capabilities if STA + * supports VHT. + */ + for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) { + vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >> + (2 * i) & 3; + + if ((vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) && + vht_mcs_mask[i]) + max_nss = i + 1; + } + arg->peer_num_spatial_streams = min(sta->deflink.rx_nss, max_nss); arg->peer_vht_rates.rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest); arg->peer_vht_rates.rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); arg->peer_vht_rates.tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest); - arg->peer_vht_rates.tx_mcs_set = - __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map); + arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit( + __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask); + + /* Configure bandwidth-NSS mapping to FW + * for the chip's tx chains setting on 160Mhz bw + */ + if (arg->peer_phymode == MODE_11AC_VHT160 || + arg->peer_phymode == MODE_11AC_VHT80_80) { + u32 rx_nss; + u32 max_rate; + + max_rate = arg->peer_vht_rates.rx_max_rate; + rx_nss = get_160mhz_nss_from_maxrate(max_rate); + + if (rx_nss == 0) + rx_nss = arg->peer_num_spatial_streams; + else + rx_nss = min(arg->peer_num_spatial_streams, rx_nss); + + max_rate = hw->vht160_mcs_tx_highest; + rx_nss = min(rx_nss, get_160mhz_nss_from_maxrate(max_rate)); + + arg->peer_bw_rxnss_override = + FIELD_PREP(WMI_PEER_NSS_MAP_ENABLE, 1) | + FIELD_PREP(WMI_PEER_NSS_160MHZ_MASK, (rx_nss - 1)); - ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer\n"); + if (arg->peer_phymode == MODE_11AC_VHT80_80) { + arg->peer_bw_rxnss_override |= + FIELD_PREP(WMI_PEER_NSS_80_80MHZ_MASK, (rx_nss - 1)); + } + } + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac vht peer %pM max_mpdu %d flags 0x%x peer_rx_nss_override 0x%x\n", + sta->addr, arg->peer_max_mpdu, + arg->peer_flags, arg->peer_bw_rxnss_override); } static void ath10k_peer_assoc_h_qos(struct ath10k *ar, - struct ath10k_vif *arvif, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_bss_conf *bss_conf, struct wmi_peer_assoc_complete_arg *arg) { + struct ath10k_vif *arvif = (void *)vif->drv_priv; + switch (arvif->vdev_type) { case WMI_VDEV_TYPE_AP: - ath10k_peer_assoc_h_qos_ap(ar, arvif, sta, bss_conf, arg); + if (sta->wme) + arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; + + if (sta->wme && sta->uapsd_queues) { + arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd; + arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG; + } break; case WMI_VDEV_TYPE_STA: - ath10k_peer_assoc_h_qos_sta(ar, arvif, sta, bss_conf, arg); + if (sta->wme) + arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; + break; + case WMI_VDEV_TYPE_IBSS: + if (sta->wme) + arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; break; default: break; } + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n", + sta->addr, !!(arg->peer_flags & + arvif->ar->wmi.peer_flags->qos)); +} + +static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) +{ + return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >> + ATH10K_MAC_FIRST_OFDM_RATE_IDX; +} + +static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar, + struct ieee80211_sta *sta) +{ + struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; + + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) { + switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { + case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: + return MODE_11AC_VHT160; + case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: + return MODE_11AC_VHT80_80; + default: + /* not sure if this is a valid case? */ + return MODE_11AC_VHT160; + } + } + + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) + return MODE_11AC_VHT80; + + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) + return MODE_11AC_VHT40; + + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) + return MODE_11AC_VHT20; + + return MODE_UNKNOWN; } static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, - struct ath10k_vif *arvif, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct wmi_peer_assoc_complete_arg *arg) { + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct cfg80211_chan_def def; + enum nl80211_band band; + const u8 *ht_mcs_mask; + const u16 *vht_mcs_mask; enum wmi_phy_mode phymode = MODE_UNKNOWN; - /* FIXME: add VHT */ + if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) + return; + + band = def.chan->band; + ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; + vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; - switch (ar->hw->conf.chandef.chan->band) { - case IEEE80211_BAND_2GHZ: - if (sta->ht_cap.ht_supported) { - if (sta->bandwidth == IEEE80211_STA_RX_BW_40) + switch (band) { + case NL80211_BAND_2GHZ: + if (sta->deflink.vht_cap.vht_supported && + !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) + phymode = MODE_11AC_VHT40; + else + phymode = MODE_11AC_VHT20; + } else if (sta->deflink.ht_cap.ht_supported && + !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) phymode = MODE_11NG_HT40; else phymode = MODE_11NG_HT20; - } else { + } else if (ath10k_mac_sta_has_ofdm_only(sta)) { phymode = MODE_11G; + } else { + phymode = MODE_11B; } break; - case IEEE80211_BAND_5GHZ: - if (sta->ht_cap.ht_supported) { - if (sta->bandwidth == IEEE80211_STA_RX_BW_40) + case NL80211_BAND_5GHZ: + /* + * Check VHT first. + */ + if (sta->deflink.vht_cap.vht_supported && + !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { + phymode = ath10k_mac_get_phymode_vht(ar, sta); + } else if (sta->deflink.ht_cap.ht_supported && + !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { + if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) phymode = MODE_11NA_HT40; else phymode = MODE_11NA_HT20; @@ -1045,28 +2811,275 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, break; } + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n", + sta->addr, ath10k_wmi_phymode_str(phymode)); + arg->peer_phymode = phymode; WARN_ON(phymode == MODE_UNKNOWN); } -static int ath10k_peer_assoc(struct ath10k *ar, - struct ath10k_vif *arvif, - struct ieee80211_sta *sta, - struct ieee80211_bss_conf *bss_conf) +static int ath10k_peer_assoc_prepare(struct ath10k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct wmi_peer_assoc_complete_arg *arg) +{ + lockdep_assert_held(&ar->conf_mutex); + + memset(arg, 0, sizeof(*arg)); + + ath10k_peer_assoc_h_basic(ar, vif, sta, arg); + ath10k_peer_assoc_h_crypto(ar, vif, sta, arg); + ath10k_peer_assoc_h_rates(ar, vif, sta, arg); + ath10k_peer_assoc_h_ht(ar, vif, sta, arg); + ath10k_peer_assoc_h_phymode(ar, vif, sta, arg); + ath10k_peer_assoc_h_vht(ar, vif, sta, arg); + ath10k_peer_assoc_h_qos(ar, vif, sta, arg); + + return 0; +} + +static const u32 ath10k_smps_map[] = { + [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC, + [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC, + [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE, + [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE, +}; + +static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif, + const u8 *addr, + const struct ieee80211_sta_ht_cap *ht_cap) +{ + int smps; + + if (!ht_cap->ht_supported) + return 0; + + smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; + smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; + + if (smps >= ARRAY_SIZE(ath10k_smps_map)) + return -EINVAL; + + return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr, + ar->wmi.peer_param->smps_state, + ath10k_smps_map[smps]); +} + +static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta_vht_cap vht_cap) +{ + struct ath10k_vif *arvif = (void *)vif->drv_priv; + int ret; + u32 param; + u32 value; + + if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC) + return 0; + + if (!(ar->vht_cap_info & + (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | + IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))) + return 0; + + param = ar->wmi.vdev_param->txbf; + value = 0; + + if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED)) + return 0; + + /* The following logic is correct. If a remote STA advertises support + * for being a beamformer then we should enable us being a beamformee. + */ + + if (ar->vht_cap_info & + (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { + if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) + value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; + + if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) + value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE; + } + + if (ar->vht_cap_info & + (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { + if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) + value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; + + if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) + value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER; + } + + if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE) + value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; + + if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER) + value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; + + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value); + if (ret) { + ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n", + value, ret); + return ret; + } + + return 0; +} + +static bool ath10k_mac_is_connected(struct ath10k *ar) +{ + struct ath10k_vif *arvif; + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_STA) + return true; + } + + return false; +} + +static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower) { - struct wmi_peer_assoc_complete_arg arg; + int ret; + u32 param; + int tx_power_2g, tx_power_5g; + bool connected; + + lockdep_assert_held(&ar->conf_mutex); + + /* ath10k internally uses unit of 0.5 dBm so multiply by 2 */ + tx_power_2g = txpower * 2; + tx_power_5g = txpower * 2; + + connected = ath10k_mac_is_connected(ar); + + if (connected && ar->tx_power_2g_limit) + if (tx_power_2g > ar->tx_power_2g_limit) + tx_power_2g = ar->tx_power_2g_limit; + + if (connected && ar->tx_power_5g_limit) + if (tx_power_5g > ar->tx_power_5g_limit) + tx_power_5g = ar->tx_power_5g_limit; + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower 2g: %d, 5g: %d\n", + tx_power_2g, tx_power_5g); + + param = ar->wmi.pdev_param->txpower_limit2g; + ret = ath10k_wmi_pdev_set_param(ar, param, tx_power_2g); + if (ret) { + ath10k_warn(ar, "failed to set 2g txpower %d: %d\n", + tx_power_2g, ret); + return ret; + } + + param = ar->wmi.pdev_param->txpower_limit5g; + ret = ath10k_wmi_pdev_set_param(ar, param, tx_power_5g); + if (ret) { + ath10k_warn(ar, "failed to set 5g txpower %d: %d\n", + tx_power_5g, ret); + return ret; + } + + return 0; +} + +static int ath10k_mac_txpower_recalc(struct ath10k *ar) +{ + struct ath10k_vif *arvif; + int ret, txpower = -1; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + /* txpower not initialized yet? */ + if (arvif->txpower == INT_MIN) + continue; + + if (txpower == -1) + txpower = arvif->txpower; + else + txpower = min(txpower, arvif->txpower); + } + + if (txpower == -1) + return 0; + + ret = ath10k_mac_txpower_setup(ar, txpower); + if (ret) { + ath10k_warn(ar, "failed to setup tx power %d: %d\n", + txpower, ret); + return ret; + } + + return 0; +} + +static int ath10k_mac_set_sar_power(struct ath10k *ar) +{ + if (!ar->hw_params.dynamic_sar_support) + return -EOPNOTSUPP; + + if (!ath10k_mac_is_connected(ar)) + return 0; + + /* if connected, then arvif->txpower must be valid */ + return ath10k_mac_txpower_recalc(ar); +} + +static int ath10k_mac_set_sar_specs(struct ieee80211_hw *hw, + const struct cfg80211_sar_specs *sar) +{ + const struct cfg80211_sar_sub_specs *sub_specs; + struct ath10k *ar = hw->priv; + u32 i; + int ret; + + mutex_lock(&ar->conf_mutex); + + if (!ar->hw_params.dynamic_sar_support) { + ret = -EOPNOTSUPP; + goto err; + } + + if (!sar || sar->type != NL80211_SAR_TYPE_POWER || + sar->num_sub_specs == 0) { + ret = -EINVAL; + goto err; + } + + sub_specs = sar->sub_specs; + + /* 0dbm is not a practical value for ath10k, so use 0 + * as no SAR limitation on it. + */ + ar->tx_power_2g_limit = 0; + ar->tx_power_5g_limit = 0; + + /* note the power is in 0.25dbm unit, while ath10k uses + * 0.5dbm unit. + */ + for (i = 0; i < sar->num_sub_specs; i++) { + if (sub_specs->freq_range_index == 0) + ar->tx_power_2g_limit = sub_specs->power / 2; + else if (sub_specs->freq_range_index == 1) + ar->tx_power_5g_limit = sub_specs->power / 2; - memset(&arg, 0, sizeof(struct wmi_peer_assoc_complete_arg)); + sub_specs++; + } - ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, &arg); - ath10k_peer_assoc_h_crypto(ar, arvif, &arg); - ath10k_peer_assoc_h_rates(ar, sta, &arg); - ath10k_peer_assoc_h_ht(ar, sta, &arg); - ath10k_peer_assoc_h_vht(ar, sta, &arg); - ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, &arg); - ath10k_peer_assoc_h_phymode(ar, arvif, sta, &arg); + ret = ath10k_mac_set_sar_power(ar); + if (ret) { + ath10k_warn(ar, "failed to set sar power: %d", ret); + goto err; + } - return ath10k_wmi_peer_assoc(ar, &arg); +err: + mutex_unlock(&ar->conf_mutex); + return ret; } /* can be called only in mac80211 callbacks due to `key_count` usage */ @@ -1075,106 +3088,294 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, struct ieee80211_bss_conf *bss_conf) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct ieee80211_sta_ht_cap ht_cap; + struct ieee80211_sta_vht_cap vht_cap; + struct wmi_peer_assoc_complete_arg peer_arg; struct ieee80211_sta *ap_sta; int ret; + lockdep_assert_held(&ar->conf_mutex); + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n", + arvif->vdev_id, arvif->bssid, arvif->aid); + rcu_read_lock(); ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); if (!ap_sta) { - ath10k_warn("Failed to find station entry for %pM\n", - bss_conf->bssid); + ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n", + bss_conf->bssid, arvif->vdev_id); rcu_read_unlock(); return; } - ret = ath10k_peer_assoc(ar, arvif, ap_sta, bss_conf); + /* ap_sta must be accessed only within rcu section which must be left + * before calling ath10k_setup_peer_smps() which might sleep. + */ + ht_cap = ap_sta->deflink.ht_cap; + vht_cap = ap_sta->deflink.vht_cap; + + ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg); if (ret) { - ath10k_warn("Peer assoc failed for %pM\n", bss_conf->bssid); + ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n", + bss_conf->bssid, arvif->vdev_id, ret); rcu_read_unlock(); return; } rcu_read_unlock(); - ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, bss_conf->aid, - bss_conf->bssid); + ret = ath10k_wmi_peer_assoc(ar, &peer_arg); + if (ret) { + ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n", + bss_conf->bssid, arvif->vdev_id, ret); + return; + } + + ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap); + if (ret) { + ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n", + arvif->vdev_id, ret); + return; + } + + ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); + if (ret) { + ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n", + arvif->vdev_id, bss_conf->bssid, ret); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac vdev %d up (associated) bssid %pM aid %d\n", + arvif->vdev_id, bss_conf->bssid, vif->cfg.aid); + + WARN_ON(arvif->is_up); + + arvif->aid = vif->cfg.aid; + ether_addr_copy(arvif->bssid, bss_conf->bssid); + + ret = ath10k_wmi_pdev_set_param(ar, + ar->wmi.pdev_param->peer_stats_info_enable, 1); if (ret) - ath10k_warn("VDEV: %d up failed: ret %d\n", + ath10k_warn(ar, "failed to enable peer stats info: %d\n", ret); + + ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid); + if (ret) { + ath10k_warn(ar, "failed to set vdev %d up: %d\n", arvif->vdev_id, ret); - else - ath10k_dbg(ATH10K_DBG_MAC, - "VDEV: %d associated, BSSID: %pM, AID: %d\n", - arvif->vdev_id, bss_conf->bssid, bss_conf->aid); + return; + } + + arvif->is_up = true; + + ath10k_mac_set_sar_power(ar); + + /* Workaround: Some firmware revisions (tested with qca6174 + * WLAN.RM.2.0-00073) have buggy powersave state machine and must be + * poked with peer param command. + */ + ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid, + ar->wmi.peer_param->dummy_var, 1); + if (ret) { + ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n", + arvif->bssid, arvif->vdev_id, ret); + return; + } } -/* - * FIXME: flush TIDs - */ static void ath10k_bss_disassoc(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct ieee80211_sta_vht_cap vht_cap = {}; int ret; - /* - * For some reason, calling VDEV-DOWN before VDEV-STOP - * makes the FW to send frames via HTT after disassociation. - * No idea why this happens, even though VDEV-DOWN is supposed - * to be analogous to link down, so just stop the VDEV. - */ - ret = ath10k_vdev_stop(arvif); - if (!ret) - ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d stopped\n", - arvif->vdev_id); + lockdep_assert_held(&ar->conf_mutex); + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n", + arvif->vdev_id, arvif->bssid); - /* - * If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and - * report beacons from previously associated network through HTT. - * This in turn would spam mac80211 WARN_ON if we bring down all - * interfaces as it expects there is no rx when no interface is - * running. - */ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); if (ret) - ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d ath10k_wmi_vdev_down failed (%d)\n", - arvif->vdev_id, ret); + ath10k_warn(ar, "failed to down vdev %i: %d\n", + arvif->vdev_id, ret); + + arvif->def_wep_key_idx = -1; + + ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); + if (ret) { + ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n", + arvif->vdev_id, ret); + return; + } - ath10k_wmi_flush_tx(ar); + arvif->is_up = false; - arvif->def_wep_key_index = 0; + ath10k_mac_txpower_recalc(ar); + + cancel_delayed_work_sync(&arvif->connection_loss_work); } -static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif, - struct ieee80211_sta *sta) +static int ath10k_new_peer_tid_config(struct ath10k *ar, + struct ieee80211_sta *sta, + struct ath10k_vif *arvif) { + struct wmi_per_peer_per_tid_cfg_arg arg = {}; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + bool config_apply; + int ret, i; + + for (i = 0; i < ATH10K_TID_MAX; i++) { + config_apply = false; + if (arvif->retry_long[i] || arvif->ampdu[i] || + arvif->rate_ctrl[i] || arvif->rtscts[i]) { + config_apply = true; + arg.tid = i; + arg.vdev_id = arvif->vdev_id; + arg.retry_count = arvif->retry_long[i]; + arg.aggr_control = arvif->ampdu[i]; + arg.rate_ctrl = arvif->rate_ctrl[i]; + arg.rcode_flags = arvif->rate_code[i]; + + if (arvif->rtscts[i]) + arg.ext_tid_cfg_bitmap = + WMI_EXT_TID_RTS_CTS_CONFIG; + else + arg.ext_tid_cfg_bitmap = 0; + + arg.rtscts_ctrl = arvif->rtscts[i]; + } + + if (arvif->noack[i]) { + arg.ack_policy = arvif->noack[i]; + arg.rate_ctrl = WMI_TID_CONFIG_RATE_CONTROL_DEFAULT_LOWEST_RATE; + arg.aggr_control = WMI_TID_CONFIG_AGGR_CONTROL_DISABLE; + config_apply = true; + } + + /* Assign default value(-1) to newly connected station. + * This is to identify station specific tid configuration not + * configured for the station. + */ + arsta->retry_long[i] = -1; + arsta->noack[i] = -1; + arsta->ampdu[i] = -1; + + if (!config_apply) + continue; + + ether_addr_copy(arg.peer_macaddr.addr, sta->addr); + + ret = ath10k_wmi_set_per_peer_per_tid_cfg(ar, &arg); + if (ret) { + ath10k_warn(ar, "failed to set per tid retry/aggr config for sta %pM: %d\n", + sta->addr, ret); + return ret; + } + + memset(&arg, 0, sizeof(arg)); + } + + return 0; +} + +static int ath10k_station_assoc(struct ath10k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + bool reassoc) +{ + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct wmi_peer_assoc_complete_arg peer_arg; int ret = 0; - ret = ath10k_peer_assoc(ar, arvif, sta, NULL); + lockdep_assert_held(&ar->conf_mutex); + + ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg); if (ret) { - ath10k_warn("WMI peer assoc failed for %pM\n", sta->addr); + ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n", + sta->addr, arvif->vdev_id, ret); return ret; } - ret = ath10k_install_peer_wep_keys(arvif, sta->addr); + ret = ath10k_wmi_peer_assoc(ar, &peer_arg); if (ret) { - ath10k_warn("could not install peer wep keys (%d)\n", ret); + ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n", + sta->addr, arvif->vdev_id, ret); return ret; } - return ret; + /* Re-assoc is run only to update supported rates for given station. It + * doesn't make much sense to reconfigure the peer completely. + */ + if (!reassoc) { + ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, + &sta->deflink.ht_cap); + if (ret) { + ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n", + arvif->vdev_id, ret); + return ret; + } + + ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta); + if (ret) { + ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n", + sta->addr, arvif->vdev_id, ret); + return ret; + } + + if (!sta->wme) { + arvif->num_legacy_stations++; + ret = ath10k_recalc_rtscts_prot(arvif); + if (ret) { + ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + /* Plumb cached keys only for static WEP */ + if ((arvif->def_wep_key_idx != -1) && (!sta->tdls)) { + ret = ath10k_install_peer_wep_keys(arvif, sta->addr); + if (ret) { + ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + } + + if (!test_bit(WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT, ar->wmi.svc_map)) + return ret; + + return ath10k_new_peer_tid_config(ar, sta, arvif); } -static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif, +static int ath10k_station_disassoc(struct ath10k *ar, + struct ieee80211_vif *vif, struct ieee80211_sta *sta) { + struct ath10k_vif *arvif = (void *)vif->drv_priv; int ret = 0; + lockdep_assert_held(&ar->conf_mutex); + + if (!sta->wme) { + arvif->num_legacy_stations--; + ret = ath10k_recalc_rtscts_prot(arvif); + if (ret) { + ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + ret = ath10k_clear_peer_keys(arvif, sta->addr); if (ret) { - ath10k_warn("could not clear all peer wep keys (%d)\n", ret); + ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n", + arvif->vdev_id, ret); return ret; } @@ -1189,17 +3390,19 @@ static int ath10k_update_channel_list(struct ath10k *ar) { struct ieee80211_hw *hw = ar->hw; struct ieee80211_supported_band **bands; - enum ieee80211_band band; + enum nl80211_band band; struct ieee80211_channel *channel; - struct wmi_scan_chan_list_arg arg = {0}; + struct wmi_scan_chan_list_arg arg = {}; struct wmi_channel_arg *ch; bool passive; int len; int ret; int i; + lockdep_assert_held(&ar->conf_mutex); + bands = hw->wiphy->bands; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!bands[band]) continue; @@ -1218,7 +3421,7 @@ static int ath10k_update_channel_list(struct ath10k *ar) return -ENOMEM; ch = arg.channels; - for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!bands[band]) continue; @@ -1228,31 +3431,43 @@ static int ath10k_update_channel_list(struct ath10k *ar) if (channel->flags & IEEE80211_CHAN_DISABLED) continue; - ch->allow_ht = true; + ch->allow_ht = true; /* FIXME: when should we really allow VHT? */ ch->allow_vht = true; ch->allow_ibss = - !(channel->flags & IEEE80211_CHAN_NO_IBSS); + !(channel->flags & IEEE80211_CHAN_NO_IR); ch->ht40plus = !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS); - passive = channel->flags & IEEE80211_CHAN_PASSIVE_SCAN; + ch->chan_radar = + !!(channel->flags & IEEE80211_CHAN_RADAR); + + passive = channel->flags & IEEE80211_CHAN_NO_IR; ch->passive = passive; + /* the firmware is ignoring the "radar" flag of the + * channel and is scanning actively using Probe Requests + * on "Radar detection"/DFS channels which are not + * marked as "available" + */ + ch->passive |= ch->chan_radar; + ch->freq = channel->center_freq; - ch->min_power = channel->max_power * 3; - ch->max_power = channel->max_power * 4; - ch->max_reg_power = channel->max_reg_power * 4; + ch->band_center_freq1 = channel->center_freq; + ch->min_power = 0; + ch->max_power = channel->max_power * 2; + ch->max_reg_power = channel->max_reg_power * 2; ch->max_antenna_gain = channel->max_antenna_gain; ch->reg_class_id = 0; /* FIXME */ /* FIXME: why use only legacy modes, why not any * HT/VHT modes? Would that even make any - * difference? */ - if (channel->band == IEEE80211_BAND_2GHZ) + * difference? + */ + if (channel->band == NL80211_BAND_2GHZ) ch->mode = MODE_11G; else ch->mode = MODE_11A; @@ -1260,9 +3475,9 @@ static int ath10k_update_channel_list(struct ath10k *ar) if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN)) continue; - ath10k_dbg(ATH10K_DBG_WMI, - "%s: [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n", - __func__, ch - arg.channels, arg.n_channels, + ath10k_dbg(ar, ATH10K_DBG_WMI, + "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n", + ch - arg.channels, arg.n_channels, ch->freq, ch->max_power, ch->max_reg_power, ch->max_antenna_gain, ch->mode); @@ -1276,107 +3491,392 @@ static int ath10k_update_channel_list(struct ath10k *ar) return ret; } -static void ath10k_reg_notifier(struct wiphy *wiphy, - struct regulatory_request *request) +static enum wmi_dfs_region +ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region) +{ + switch (dfs_region) { + case NL80211_DFS_UNSET: + return WMI_UNINIT_DFS_DOMAIN; + case NL80211_DFS_FCC: + return WMI_FCC_DFS_DOMAIN; + case NL80211_DFS_ETSI: + return WMI_ETSI_DFS_DOMAIN; + case NL80211_DFS_JP: + return WMI_MKK4_DFS_DOMAIN; + } + return WMI_UNINIT_DFS_DOMAIN; +} + +static void ath10k_regd_update(struct ath10k *ar) { - struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct reg_dmn_pair_mapping *regpair; - struct ath10k *ar = hw->priv; int ret; + enum wmi_dfs_region wmi_dfs_reg; + enum nl80211_dfs_regions nl_dfs_reg; - ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); + lockdep_assert_held(&ar->conf_mutex); ret = ath10k_update_channel_list(ar); if (ret) - ath10k_warn("could not update channel list (%d)\n", ret); + ath10k_warn(ar, "failed to update channel list: %d\n", ret); regpair = ar->ath_common.regulatory.regpair; + + if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { + nl_dfs_reg = ar->dfs_detector->region; + wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg); + } else { + wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN; + } + /* Target allows setting up per-band regdomain but ath_common provides - * a combined one only */ + * a combined one only + */ ret = ath10k_wmi_pdev_set_regdomain(ar, - regpair->regDmnEnum, - regpair->regDmnEnum, /* 2ghz */ - regpair->regDmnEnum, /* 5ghz */ + regpair->reg_domain, + regpair->reg_domain, /* 2ghz */ + regpair->reg_domain, /* 5ghz */ regpair->reg_2ghz_ctl, - regpair->reg_5ghz_ctl); + regpair->reg_5ghz_ctl, + wmi_dfs_reg); if (ret) - ath10k_warn("could not set pdev regdomain (%d)\n", ret); + ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret); +} + +static void ath10k_mac_update_channel_list(struct ath10k *ar, + struct ieee80211_supported_band *band) +{ + int i; + + if (ar->low_5ghz_chan && ar->high_5ghz_chan) { + for (i = 0; i < band->n_channels; i++) { + if (band->channels[i].center_freq < ar->low_5ghz_chan || + band->channels[i].center_freq > ar->high_5ghz_chan) + band->channels[i].flags |= + IEEE80211_CHAN_DISABLED; + } + } +} + +static void ath10k_reg_notifier(struct wiphy *wiphy, + struct regulatory_request *request) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct ath10k *ar = hw->priv; + bool result; + + ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); + + if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n", + request->dfs_region); + result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector, + request->dfs_region); + if (!result) + ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n", + request->dfs_region); + } + + mutex_lock(&ar->conf_mutex); + if (ar->state == ATH10K_STATE_ON) + ath10k_regd_update(ar); + mutex_unlock(&ar->conf_mutex); + + if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) + ath10k_mac_update_channel_list(ar, + ar->hw->wiphy->bands[NL80211_BAND_5GHZ]); +} + +static void ath10k_stop_radar_confirmation(struct ath10k *ar) +{ + spin_lock_bh(&ar->data_lock); + ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_STOPPED; + spin_unlock_bh(&ar->data_lock); + + cancel_work_sync(&ar->radar_confirmation_work); } /***************/ /* TX handlers */ /***************/ -/* - * Frames sent to the FW have to be in "Native Wifi" format. - * Strip the QoS field from the 802.11 header. - */ -static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw, - struct ieee80211_tx_control *control, - struct sk_buff *skb) +enum ath10k_mac_tx_path { + ATH10K_MAC_TX_HTT, + ATH10K_MAC_TX_HTT_MGMT, + ATH10K_MAC_TX_WMI_MGMT, + ATH10K_MAC_TX_UNKNOWN, +}; + +void ath10k_mac_tx_lock(struct ath10k *ar, int reason) { - struct ieee80211_hdr *hdr = (void *)skb->data; - u8 *qos_ctl; + lockdep_assert_held(&ar->htt.tx_lock); - if (!ieee80211_is_data_qos(hdr->frame_control)) + WARN_ON(reason >= ATH10K_TX_PAUSE_MAX); + ar->tx_paused |= BIT(reason); + ieee80211_stop_queues(ar->hw); +} + +static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct ath10k *ar = data; + struct ath10k_vif *arvif = (void *)vif->drv_priv; + + if (arvif->tx_paused) return; - qos_ctl = ieee80211_get_qos_ctl(hdr); - memmove(qos_ctl, qos_ctl + IEEE80211_QOS_CTL_LEN, - skb->len - ieee80211_hdrlen(hdr->frame_control)); - skb_trim(skb, skb->len - IEEE80211_QOS_CTL_LEN); + ieee80211_wake_queue(ar->hw, arvif->vdev_id); } -static void ath10k_tx_h_update_wep_key(struct sk_buff *skb) +void ath10k_mac_tx_unlock(struct ath10k *ar, int reason) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_vif *vif = info->control.vif; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); - struct ath10k *ar = arvif->ar; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct ieee80211_key_conf *key = info->control.hw_key; - int ret; + lockdep_assert_held(&ar->htt.tx_lock); - /* TODO AP mode should be implemented */ - if (vif->type != NL80211_IFTYPE_STATION) - return; + WARN_ON(reason >= ATH10K_TX_PAUSE_MAX); + ar->tx_paused &= ~BIT(reason); - if (!ieee80211_has_protected(hdr->frame_control)) + if (ar->tx_paused) return; - if (!key) + ieee80211_iterate_active_interfaces_atomic(ar->hw, + ATH10K_ITER_RESUME_FLAGS, + ath10k_mac_tx_unlock_iter, + ar); + + ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue); +} + +void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason) +{ + struct ath10k *ar = arvif->ar; + + lockdep_assert_held(&ar->htt.tx_lock); + + WARN_ON(reason >= BITS_PER_LONG); + arvif->tx_paused |= BIT(reason); + ieee80211_stop_queue(ar->hw, arvif->vdev_id); +} + +void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason) +{ + struct ath10k *ar = arvif->ar; + + lockdep_assert_held(&ar->htt.tx_lock); + + WARN_ON(reason >= BITS_PER_LONG); + arvif->tx_paused &= ~BIT(reason); + + if (ar->tx_paused) return; - if (key->cipher != WLAN_CIPHER_SUITE_WEP40 && - key->cipher != WLAN_CIPHER_SUITE_WEP104) + if (arvif->tx_paused) return; - if (key->keyidx == arvif->def_wep_key_index) + ieee80211_wake_queue(ar->hw, arvif->vdev_id); +} + +static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif, + enum wmi_tlv_tx_pause_id pause_id, + enum wmi_tlv_tx_pause_action action) +{ + struct ath10k *ar = arvif->ar; + + lockdep_assert_held(&ar->htt.tx_lock); + + switch (action) { + case WMI_TLV_TX_PAUSE_ACTION_STOP: + ath10k_mac_vif_tx_lock(arvif, pause_id); + break; + case WMI_TLV_TX_PAUSE_ACTION_WAKE: + ath10k_mac_vif_tx_unlock(arvif, pause_id); + break; + default: + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "received unknown tx pause action %d on vdev %i, ignoring\n", + action, arvif->vdev_id); + break; + } +} + +struct ath10k_mac_tx_pause { + u32 vdev_id; + enum wmi_tlv_tx_pause_id pause_id; + enum wmi_tlv_tx_pause_action action; +}; + +static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct ath10k_mac_tx_pause *arg = data; + + if (arvif->vdev_id != arg->vdev_id) return; - ath10k_dbg(ATH10K_DBG_MAC, "new wep keyidx will be %d\n", key->keyidx); + ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action); +} - ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, - WMI_VDEV_PARAM_DEF_KEYID, - key->keyidx); - if (ret) { - ath10k_warn("could not update wep keyidx (%d)\n", ret); +void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id, + enum wmi_tlv_tx_pause_id pause_id, + enum wmi_tlv_tx_pause_action action) +{ + struct ath10k_mac_tx_pause arg = { + .vdev_id = vdev_id, + .pause_id = pause_id, + .action = action, + }; + + spin_lock_bh(&ar->htt.tx_lock); + ieee80211_iterate_active_interfaces_atomic(ar->hw, + ATH10K_ITER_RESUME_FLAGS, + ath10k_mac_handle_tx_pause_iter, + &arg); + spin_unlock_bh(&ar->htt.tx_lock); +} + +static enum ath10k_hw_txrx_mode +ath10k_mac_tx_h_get_txmode(struct ath10k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct sk_buff *skb) +{ + const struct ieee80211_hdr *hdr = (void *)skb->data; + const struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); + __le16 fc = hdr->frame_control; + + if (IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) + return ATH10K_HW_TXRX_ETHERNET; + + if (!vif || vif->type == NL80211_IFTYPE_MONITOR) + return ATH10K_HW_TXRX_RAW; + + if (ieee80211_is_mgmt(fc)) + return ATH10K_HW_TXRX_MGMT; + + /* Workaround: + * + * NullFunc frames are mostly used to ping if a client or AP are still + * reachable and responsive. This implies tx status reports must be + * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can + * come to a conclusion that the other end disappeared and tear down + * BSS connection or it can never disconnect from BSS/client (which is + * the case). + * + * Firmware with HTT older than 3.0 delivers incorrect tx status for + * NullFunc frames to driver. However there's a HTT Mgmt Tx command + * which seems to deliver correct tx reports for NullFunc frames. The + * downside of using it is it ignores client powersave state so it can + * end up disconnecting sleeping clients in AP mode. It should fix STA + * mode though because AP don't sleep. + */ + if (ar->htt.target_version_major < 3 && + (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) && + !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, + ar->running_fw->fw_file.fw_features)) + return ATH10K_HW_TXRX_MGMT; + + /* Workaround: + * + * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for + * NativeWifi txmode - it selects AP key instead of peer key. It seems + * to work with Ethernet txmode so use it. + * + * FIXME: Check if raw mode works with TDLS. + */ + if (ieee80211_is_data_present(fc) && sta && sta->tdls) + return ATH10K_HW_TXRX_ETHERNET; + + if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags) || + skb_cb->flags & ATH10K_SKB_F_RAW_TX) + return ATH10K_HW_TXRX_RAW; + + return ATH10K_HW_TXRX_NATIVE_WIFI; +} + +static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif, + struct sk_buff *skb) +{ + const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + const struct ieee80211_hdr *hdr = (void *)skb->data; + const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT | + IEEE80211_TX_CTL_INJECTED; + + if (!ieee80211_has_protected(hdr->frame_control)) + return false; + + if ((info->flags & mask) == mask) + return false; + + if (vif) + return !((struct ath10k_vif *)vif->drv_priv)->nohwcrypt; + + return true; +} + +/* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS + * Control in the header. + */ +static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); + u8 *qos_ctl; + + if (!ieee80211_is_data_qos(hdr->frame_control)) return; - } - arvif->def_wep_key_index = key->keyidx; + qos_ctl = ieee80211_get_qos_ctl(hdr); + memmove(skb->data + IEEE80211_QOS_CTL_LEN, + skb->data, (void *)qos_ctl - (void *)skb->data); + skb_pull(skb, IEEE80211_QOS_CTL_LEN); + + /* Some firmware revisions don't handle sending QoS NullFunc well. + * These frames are mainly used for CQM purposes so it doesn't really + * matter whether QoS NullFunc or NullFunc are sent. + */ + hdr = (void *)skb->data; + if (ieee80211_is_qos_nullfunc(hdr->frame_control)) + cb->flags &= ~ATH10K_SKB_F_QOS; + + hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA); +} + +static void ath10k_tx_h_8023(struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr; + struct rfc1042_hdr *rfc1042; + struct ethhdr *eth; + size_t hdrlen; + u8 da[ETH_ALEN]; + u8 sa[ETH_ALEN]; + __be16 type; + + hdr = (void *)skb->data; + hdrlen = ieee80211_hdrlen(hdr->frame_control); + rfc1042 = (void *)skb->data + hdrlen; + + ether_addr_copy(da, ieee80211_get_DA(hdr)); + ether_addr_copy(sa, ieee80211_get_SA(hdr)); + type = rfc1042->snap_type; + + skb_pull(skb, hdrlen + sizeof(*rfc1042)); + skb_push(skb, sizeof(*eth)); + + eth = (void *)skb->data; + ether_addr_copy(eth->h_dest, da); + ether_addr_copy(eth->h_source, sa); + eth->h_proto = type; } -static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb) +static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, + struct ieee80211_vif *vif, + struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_vif *vif = info->control.vif; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; /* This is case only for P2P_GO */ - if (arvif->vdev_type != WMI_VDEV_TYPE_AP || - arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) + if (vif->type != NL80211_IFTYPE_AP || !vif->p2p) return; if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) { @@ -1384,33 +3884,230 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb) if (arvif->u.ap.noa_data) if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len, GFP_ATOMIC)) - memcpy(skb_put(skb, arvif->u.ap.noa_len), - arvif->u.ap.noa_data, - arvif->u.ap.noa_len); + skb_put_data(skb, arvif->u.ap.noa_data, + arvif->u.ap.noa_len); spin_unlock_bh(&ar->data_lock); } } -static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb) +static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar, + struct ieee80211_vif *vif, + struct ieee80211_txq *txq, + struct ieee80211_sta *sta, + struct sk_buff *skb, u16 airtime) { - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - int ret; + struct ieee80211_hdr *hdr = (void *)skb->data; + struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); + const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + bool is_data = ieee80211_is_data(hdr->frame_control) || + ieee80211_is_data_qos(hdr->frame_control); + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct ath10k_sta *arsta; + u8 tid, *qos_ctl; + bool noack = false; + + cb->flags = 0; + + if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) { + cb->flags |= ATH10K_SKB_F_QOS; /* Assume data frames are QoS */ + goto finish_cb_fill; + } + + if (!ath10k_tx_h_use_hwcrypto(vif, skb)) + cb->flags |= ATH10K_SKB_F_NO_HWCRYPT; if (ieee80211_is_mgmt(hdr->frame_control)) - ret = ath10k_htt_mgmt_tx(ar->htt, skb); - else if (ieee80211_is_nullfunc(hdr->frame_control)) - /* FW does not report tx status properly for NullFunc frames - * unless they are sent through mgmt tx path. mac80211 sends - * those frames when it detects link/beacon loss and depends on - * the tx status to be correct. */ - ret = ath10k_htt_mgmt_tx(ar->htt, skb); - else - ret = ath10k_htt_tx(ar->htt, skb); + cb->flags |= ATH10K_SKB_F_MGMT; + + if (ieee80211_is_data_qos(hdr->frame_control)) { + cb->flags |= ATH10K_SKB_F_QOS; + qos_ctl = ieee80211_get_qos_ctl(hdr); + tid = (*qos_ctl) & IEEE80211_QOS_CTL_TID_MASK; + + if (arvif->noack[tid] == WMI_PEER_TID_CONFIG_NOACK) + noack = true; + + if (sta) { + arsta = (struct ath10k_sta *)sta->drv_priv; + + if (arsta->noack[tid] == WMI_PEER_TID_CONFIG_NOACK) + noack = true; + + if (arsta->noack[tid] == WMI_PEER_TID_CONFIG_ACK) + noack = false; + } + + if (noack) + cb->flags |= ATH10K_SKB_F_NOACK_TID; + } + + /* Data frames encrypted in software will be posted to firmware + * with tx encap mode set to RAW. Ex: Multicast traffic generated + * for a specific VLAN group will always be encrypted in software. + */ + if (is_data && ieee80211_has_protected(hdr->frame_control) && + !info->control.hw_key) { + cb->flags |= ATH10K_SKB_F_NO_HWCRYPT; + cb->flags |= ATH10K_SKB_F_RAW_TX; + } + +finish_cb_fill: + cb->vif = vif; + cb->txq = txq; + cb->airtime_est = airtime; + if (sta) { + arsta = (struct ath10k_sta *)sta->drv_priv; + spin_lock_bh(&ar->data_lock); + cb->ucast_cipher = arsta->ucast_cipher; + spin_unlock_bh(&ar->data_lock); + } +} + +bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar) +{ + /* FIXME: Not really sure since when the behaviour changed. At some + * point new firmware stopped requiring creation of peer entries for + * offchannel tx (and actually creating them causes issues with wmi-htc + * tx credit replenishment and reliability). Assuming it's at least 3.4 + * because that's when the `freq` was introduced to TX_FRM HTT command. + */ + return (ar->htt.target_version_major >= 3 && + ar->htt.target_version_minor >= 4 && + ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV); +} + +static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb) +{ + struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue; + + if (skb_queue_len_lockless(q) >= ATH10K_MAX_NUM_MGMT_PENDING) { + ath10k_warn(ar, "wmi mgmt tx queue is full\n"); + return -ENOSPC; + } + + skb_queue_tail(q, skb); + ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work); + + return 0; +} + +static enum ath10k_mac_tx_path +ath10k_mac_tx_h_get_txpath(struct ath10k *ar, + struct sk_buff *skb, + enum ath10k_hw_txrx_mode txmode) +{ + switch (txmode) { + case ATH10K_HW_TXRX_RAW: + case ATH10K_HW_TXRX_NATIVE_WIFI: + case ATH10K_HW_TXRX_ETHERNET: + return ATH10K_MAC_TX_HTT; + case ATH10K_HW_TXRX_MGMT: + if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, + ar->running_fw->fw_file.fw_features) || + test_bit(WMI_SERVICE_MGMT_TX_WMI, + ar->wmi.svc_map)) + return ATH10K_MAC_TX_WMI_MGMT; + else if (ar->htt.target_version_major >= 3) + return ATH10K_MAC_TX_HTT; + else + return ATH10K_MAC_TX_HTT_MGMT; + } + + return ATH10K_MAC_TX_UNKNOWN; +} + +static int ath10k_mac_tx_submit(struct ath10k *ar, + enum ath10k_hw_txrx_mode txmode, + enum ath10k_mac_tx_path txpath, + struct sk_buff *skb) +{ + struct ath10k_htt *htt = &ar->htt; + int ret = -EINVAL; + + switch (txpath) { + case ATH10K_MAC_TX_HTT: + ret = ath10k_htt_tx(htt, txmode, skb); + break; + case ATH10K_MAC_TX_HTT_MGMT: + ret = ath10k_htt_mgmt_tx(htt, skb); + break; + case ATH10K_MAC_TX_WMI_MGMT: + ret = ath10k_mac_tx_wmi_mgmt(ar, skb); + break; + case ATH10K_MAC_TX_UNKNOWN: + WARN_ON_ONCE(1); + ret = -EINVAL; + break; + } if (ret) { - ath10k_warn("tx failed (%d). dropping packet.\n", ret); + ath10k_warn(ar, "failed to transmit packet, dropping: %d\n", + ret); ieee80211_free_txskb(ar->hw, skb); } + + return ret; +} + +/* This function consumes the sk_buff regardless of return value as far as + * caller is concerned so no freeing is necessary afterwards. + */ +static int ath10k_mac_tx(struct ath10k *ar, + struct ieee80211_vif *vif, + enum ath10k_hw_txrx_mode txmode, + enum ath10k_mac_tx_path txpath, + struct sk_buff *skb, bool noque_offchan) +{ + struct ieee80211_hw *hw = ar->hw; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + const struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); + int ret; + + /* We should disable CCK RATE due to P2P */ + if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE) + ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n"); + + switch (txmode) { + case ATH10K_HW_TXRX_MGMT: + case ATH10K_HW_TXRX_NATIVE_WIFI: + ath10k_tx_h_nwifi(hw, skb); + ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb); + ath10k_tx_h_seq_no(vif, skb); + break; + case ATH10K_HW_TXRX_ETHERNET: + /* Convert 802.11->802.3 header only if the frame was earlier + * encapsulated to 802.11 by mac80211. Otherwise pass it as is. + */ + if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) + ath10k_tx_h_8023(skb); + break; + case ATH10K_HW_TXRX_RAW: + if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags) && + !(skb_cb->flags & ATH10K_SKB_F_RAW_TX)) { + WARN_ON_ONCE(1); + ieee80211_free_txskb(hw, skb); + return -EOPNOTSUPP; + } + } + + if (!noque_offchan && info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { + if (!ath10k_mac_tx_frm_has_freq(ar)) { + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac queued offchannel skb %p len %d\n", + skb, skb->len); + + skb_queue_tail(&ar->offchan_tx_queue, skb); + ieee80211_queue_work(hw, &ar->offchan_tx_work); + return 0; + } + } + + ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb); + if (ret) { + ath10k_warn(ar, "failed to submit frame: %d\n", ret); + return ret; + } + + return 0; } void ath10k_offchan_tx_purge(struct ath10k *ar) @@ -1430,18 +4127,26 @@ void ath10k_offchan_tx_work(struct work_struct *work) { struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work); struct ath10k_peer *peer; + struct ath10k_vif *arvif; + enum ath10k_hw_txrx_mode txmode; + enum ath10k_mac_tx_path txpath; struct ieee80211_hdr *hdr; + struct ieee80211_vif *vif; + struct ieee80211_sta *sta; struct sk_buff *skb; const u8 *peer_addr; int vdev_id; int ret; + unsigned long time_left; + bool tmp_peer_created = false; /* FW requirement: We must create a peer before FW will send out * an offchannel frame. Otherwise the frame will be stuck and * never transmitted. We delete the peer upon tx completion. * It is unlikely that a peer for offchannel tx will already be * present. However it may be in some rare cases so account for that. - * Otherwise we might remove a legitimate peer and break stuff. */ + * Otherwise we might remove a legitimate peer and break stuff. + */ for (;;) { skb = skb_dequeue(&ar->offchan_tx_queue); @@ -1450,45 +4155,68 @@ void ath10k_offchan_tx_work(struct work_struct *work) mutex_lock(&ar->conf_mutex); - ath10k_dbg(ATH10K_DBG_MAC, "processing offchannel skb %p\n", - skb); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p len %d\n", + skb, skb->len); hdr = (struct ieee80211_hdr *)skb->data; peer_addr = ieee80211_get_DA(hdr); - vdev_id = ATH10K_SKB_CB(skb)->htt.vdev_id; spin_lock_bh(&ar->data_lock); + vdev_id = ar->scan.vdev_id; peer = ath10k_peer_find(ar, vdev_id, peer_addr); spin_unlock_bh(&ar->data_lock); - if (peer) - ath10k_dbg(ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n", - peer_addr, vdev_id); - - if (!peer) { - ret = ath10k_peer_create(ar, vdev_id, peer_addr); + if (peer) { + ath10k_warn(ar, "peer %pM on vdev %d already present\n", + peer_addr, vdev_id); + } else { + ret = ath10k_peer_create(ar, NULL, NULL, vdev_id, + peer_addr, + WMI_PEER_TYPE_DEFAULT); if (ret) - ath10k_warn("peer %pM on vdev %d not created (%d)\n", + ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n", peer_addr, vdev_id, ret); + tmp_peer_created = (ret == 0); } spin_lock_bh(&ar->data_lock); - INIT_COMPLETION(ar->offchan_tx_completed); + reinit_completion(&ar->offchan_tx_completed); ar->offchan_tx_skb = skb; spin_unlock_bh(&ar->data_lock); - ath10k_tx_htt(ar, skb); + /* It's safe to access vif and sta - conf_mutex guarantees that + * sta_state() and remove_interface() are locked exclusively + * out wrt to this offchannel worker. + */ + arvif = ath10k_get_arvif(ar, vdev_id); + if (arvif) { + vif = arvif->vif; + sta = ieee80211_find_sta(vif, peer_addr); + } else { + vif = NULL; + sta = NULL; + } - ret = wait_for_completion_timeout(&ar->offchan_tx_completed, - 3 * HZ); - if (ret <= 0) - ath10k_warn("timed out waiting for offchannel skb %p\n", - skb); + txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); + txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); - if (!peer) { + ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, true); + if (ret) { + ath10k_warn(ar, "failed to transmit offchannel frame: %d\n", + ret); + /* not serious */ + } + + time_left = + wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ); + if (time_left == 0) + ath10k_warn(ar, "timed out waiting for offchannel skb %p, len: %d\n", + skb, skb->len); + + if (!peer && tmp_peer_created) { ret = ath10k_peer_delete(ar, vdev_id, peer_addr); if (ret) - ath10k_warn("peer %pM on vdev %d not deleted (%d)\n", + ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n", peer_addr, vdev_id, ret); } @@ -1496,38 +4224,342 @@ void ath10k_offchan_tx_work(struct work_struct *work) } } -/************/ -/* Scanning */ -/************/ +void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar) +{ + struct sk_buff *skb; -/* - * This gets called if we dont get a heart-beat during scan. - * This may indicate the FW has hung and we need to abort the - * scan manually to prevent cancel_hw_scan() from deadlocking + for (;;) { + skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); + if (!skb) + break; + + ieee80211_free_txskb(ar->hw, skb); + } +} + +void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) +{ + struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work); + struct sk_buff *skb; + dma_addr_t paddr; + int ret; + + for (;;) { + skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); + if (!skb) + break; + + if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF, + ar->running_fw->fw_file.fw_features)) { + paddr = dma_map_single(ar->dev, skb->data, + skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(ar->dev, paddr)) { + ieee80211_free_txskb(ar->hw, skb); + continue; + } + ret = ath10k_wmi_mgmt_tx_send(ar, skb, paddr); + if (ret) { + ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n", + ret); + /* remove this msdu from idr tracking */ + ath10k_wmi_cleanup_mgmt_tx_send(ar, skb); + + dma_unmap_single(ar->dev, paddr, skb->len, + DMA_TO_DEVICE); + ieee80211_free_txskb(ar->hw, skb); + } + } else { + ret = ath10k_wmi_mgmt_tx(ar, skb); + if (ret) { + ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n", + ret); + ieee80211_free_txskb(ar->hw, skb); + } + } + } +} + +static void ath10k_mac_txq_init(struct ieee80211_txq *txq) +{ + struct ath10k_txq *artxq; + + if (!txq) + return; + + artxq = (void *)txq->drv_priv; + INIT_LIST_HEAD(&artxq->list); +} + +static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq) +{ + struct ath10k_skb_cb *cb; + struct sk_buff *msdu; + int msdu_id; + + if (!txq) + return; + + spin_lock_bh(&ar->htt.tx_lock); + idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) { + cb = ATH10K_SKB_CB(msdu); + if (cb->txq == txq) + cb->txq = NULL; + } + spin_unlock_bh(&ar->htt.tx_lock); +} + +struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar, + u16 peer_id, + u8 tid) +{ + struct ath10k_peer *peer; + + lockdep_assert_held(&ar->data_lock); + + peer = ar->peer_map[peer_id]; + if (!peer) + return NULL; + + if (peer->removed) + return NULL; + + if (peer->sta) + return peer->sta->txq[tid]; + else if (peer->vif) + return peer->vif->txq; + else + return NULL; +} + +static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct ath10k *ar = hw->priv; + struct ath10k_txq *artxq = (void *)txq->drv_priv; + + /* No need to get locks */ + if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) + return true; + + if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed) + return true; + + if (artxq->num_fw_queued < artxq->num_push_allowed) + return true; + + return false; +} + +/* Return estimated airtime in microsecond, which is calculated using last + * reported TX rate. This is just a rough estimation because host driver has no + * knowledge of the actual transmit rate, retries or aggregation. If actual + * airtime can be reported by firmware, then delta between estimated and actual + * airtime can be adjusted from deficit. */ -void ath10k_reset_scan(unsigned long ptr) +#define IEEE80211_ATF_OVERHEAD 100 /* IFS + some slot time */ +#define IEEE80211_ATF_OVERHEAD_IFS 16 /* IFS only */ +static u16 ath10k_mac_update_airtime(struct ath10k *ar, + struct ieee80211_txq *txq, + struct sk_buff *skb) { - struct ath10k *ar = (struct ath10k *)ptr; + struct ath10k_sta *arsta; + u32 pktlen; + u16 airtime = 0; + + if (!txq || !txq->sta) + return airtime; + + if (test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map)) + return airtime; spin_lock_bh(&ar->data_lock); - if (!ar->scan.in_progress) { - spin_unlock_bh(&ar->data_lock); + arsta = (struct ath10k_sta *)txq->sta->drv_priv; + + pktlen = skb->len + 38; /* Assume MAC header 30, SNAP 8 for most case */ + if (arsta->last_tx_bitrate) { + /* airtime in us, last_tx_bitrate in 100kbps */ + airtime = (pktlen * 8 * (1000 / 100)) + / arsta->last_tx_bitrate; + /* overhead for media access time and IFS */ + airtime += IEEE80211_ATF_OVERHEAD_IFS; + } else { + /* This is mostly for throttle excessive BC/MC frames, and the + * airtime/rate doesn't need be exact. Airtime of BC/MC frames + * in 2G get some discount, which helps prevent very low rate + * frames from being blocked for too long. + */ + airtime = (pktlen * 8 * (1000 / 100)) / 60; /* 6M */ + airtime += IEEE80211_ATF_OVERHEAD; + } + spin_unlock_bh(&ar->data_lock); + + return airtime; +} + +int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct ath10k *ar = hw->priv; + struct ath10k_htt *htt = &ar->htt; + struct ath10k_txq *artxq = (void *)txq->drv_priv; + struct ieee80211_vif *vif = txq->vif; + struct ieee80211_sta *sta = txq->sta; + enum ath10k_hw_txrx_mode txmode; + enum ath10k_mac_tx_path txpath; + struct sk_buff *skb; + struct ieee80211_hdr *hdr; + size_t skb_len; + bool is_mgmt, is_presp; + int ret; + u16 airtime; + + spin_lock_bh(&ar->htt.tx_lock); + ret = ath10k_htt_tx_inc_pending(htt); + spin_unlock_bh(&ar->htt.tx_lock); + + if (ret) + return ret; + + skb = ieee80211_tx_dequeue_ni(hw, txq); + if (!skb) { + spin_lock_bh(&ar->htt.tx_lock); + ath10k_htt_tx_dec_pending(htt); + spin_unlock_bh(&ar->htt.tx_lock); + + return -ENOENT; + } + + airtime = ath10k_mac_update_airtime(ar, txq, skb); + ath10k_mac_tx_h_fill_cb(ar, vif, txq, sta, skb, airtime); + + skb_len = skb->len; + txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); + txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); + is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT); + + if (is_mgmt) { + hdr = (struct ieee80211_hdr *)skb->data; + is_presp = ieee80211_is_probe_resp(hdr->frame_control); + + spin_lock_bh(&ar->htt.tx_lock); + ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); + + if (ret) { + ath10k_htt_tx_dec_pending(htt); + spin_unlock_bh(&ar->htt.tx_lock); + return ret; + } + spin_unlock_bh(&ar->htt.tx_lock); + } + + ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false); + if (unlikely(ret)) { + ath10k_warn(ar, "failed to push frame: %d\n", ret); + + spin_lock_bh(&ar->htt.tx_lock); + ath10k_htt_tx_dec_pending(htt); + if (is_mgmt) + ath10k_htt_tx_mgmt_dec_pending(htt); + spin_unlock_bh(&ar->htt.tx_lock); + + return ret; + } + + spin_lock_bh(&ar->htt.tx_lock); + artxq->num_fw_queued++; + spin_unlock_bh(&ar->htt.tx_lock); + + return skb_len; +} + +static int ath10k_mac_schedule_txq(struct ieee80211_hw *hw, u32 ac) +{ + struct ieee80211_txq *txq; + int ret = 0; + + ieee80211_txq_schedule_start(hw, ac); + while ((txq = ieee80211_next_txq(hw, ac))) { + while (ath10k_mac_tx_can_push(hw, txq)) { + ret = ath10k_mac_tx_push_txq(hw, txq); + if (ret < 0) + break; + } + ieee80211_return_txq(hw, txq, false); + ath10k_htt_tx_txq_update(hw, txq); + if (ret == -EBUSY) + break; + } + ieee80211_txq_schedule_end(hw, ac); + + return ret; +} + +void ath10k_mac_tx_push_pending(struct ath10k *ar) +{ + struct ieee80211_hw *hw = ar->hw; + u32 ac; + + if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH) + return; + + if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2)) return; + + rcu_read_lock(); + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + if (ath10k_mac_schedule_txq(hw, ac) == -EBUSY) + break; } + rcu_read_unlock(); +} +EXPORT_SYMBOL(ath10k_mac_tx_push_pending); - ath10k_warn("scan timeout. resetting. fw issue?\n"); +/************/ +/* Scanning */ +/************/ - if (ar->scan.is_roc) - ieee80211_remain_on_channel_expired(ar->hw); - else - ieee80211_scan_completed(ar->hw, 1 /* aborted */); +void __ath10k_scan_finish(struct ath10k *ar) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + break; + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: + if (ar->scan.is_roc && ar->scan.roc_notify) + ieee80211_remain_on_channel_expired(ar->hw); + fallthrough; + case ATH10K_SCAN_STARTING: + if (!ar->scan.is_roc) { + struct cfg80211_scan_info info = { + .aborted = ((ar->scan.state == + ATH10K_SCAN_ABORTING) || + (ar->scan.state == + ATH10K_SCAN_STARTING)), + }; + + ieee80211_scan_completed(ar->hw, &info); + } + + ar->scan.state = ATH10K_SCAN_IDLE; + ar->scan_channel = NULL; + ar->scan.roc_freq = 0; + ath10k_offchan_tx_purge(ar); + cancel_delayed_work(&ar->scan.timeout); + complete(&ar->scan.completed); + break; + } +} - ar->scan.in_progress = false; - complete_all(&ar->scan.completed); +void ath10k_scan_finish(struct ath10k *ar) +{ + spin_lock_bh(&ar->data_lock); + __ath10k_scan_finish(ar); spin_unlock_bh(&ar->data_lock); } -static int ath10k_abort_scan(struct ath10k *ar) +static int ath10k_scan_stop(struct ath10k *ar) { struct wmi_stop_scan_arg arg = { .req_id = 1, /* FIXME */ @@ -1538,45 +4570,79 @@ static int ath10k_abort_scan(struct ath10k *ar) lockdep_assert_held(&ar->conf_mutex); - del_timer_sync(&ar->scan.timeout); + ret = ath10k_wmi_stop_scan(ar, &arg); + if (ret) { + ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret); + goto out; + } - spin_lock_bh(&ar->data_lock); - if (!ar->scan.in_progress) { - spin_unlock_bh(&ar->data_lock); - return 0; + ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ); + if (ret == 0) { + ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n"); + ret = -ETIMEDOUT; + } else if (ret > 0) { + ret = 0; } - ar->scan.aborting = true; +out: + /* Scan state should be updated upon scan completion but in case + * firmware fails to deliver the event (for whatever reason) it is + * desired to clean up scan state anyway. Firmware may have just + * dropped the scan completion event delivery due to transport pipe + * being overflown with data and/or it can recover on its own before + * next scan request is submitted. + */ + spin_lock_bh(&ar->data_lock); + if (ar->scan.state != ATH10K_SCAN_IDLE) + __ath10k_scan_finish(ar); spin_unlock_bh(&ar->data_lock); - ret = ath10k_wmi_stop_scan(ar, &arg); - if (ret) { - ath10k_warn("could not submit wmi stop scan (%d)\n", ret); - return -EIO; - } - - ath10k_wmi_flush_tx(ar); + return ret; +} - ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ); - if (ret == 0) - ath10k_warn("timed out while waiting for scan to stop\n"); +static void ath10k_scan_abort(struct ath10k *ar) +{ + int ret; - /* scan completion may be done right after we timeout here, so let's - * check the in_progress and tell mac80211 scan is completed. if we - * don't do that and FW fails to send us scan completion indication - * then userspace won't be able to scan anymore */ - ret = 0; + lockdep_assert_held(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); - if (ar->scan.in_progress) { - ath10k_warn("could not stop scan. its still in progress\n"); - ar->scan.in_progress = false; - ath10k_offchan_tx_purge(ar); - ret = -ETIMEDOUT; + + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + /* This can happen if timeout worker kicked in and called + * abortion while scan completion was being processed. + */ + break; + case ATH10K_SCAN_STARTING: + case ATH10K_SCAN_ABORTING: + ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n", + ath10k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ATH10K_SCAN_RUNNING: + ar->scan.state = ATH10K_SCAN_ABORTING; + spin_unlock_bh(&ar->data_lock); + + ret = ath10k_scan_stop(ar); + if (ret) + ath10k_warn(ar, "failed to abort scan: %d\n", ret); + + spin_lock_bh(&ar->data_lock); + break; } + spin_unlock_bh(&ar->data_lock); +} - return ret; +void ath10k_scan_timeout_work(struct work_struct *work) +{ + struct ath10k *ar = container_of(work, struct ath10k, + scan.timeout.work); + + mutex_lock(&ar->conf_mutex); + ath10k_scan_abort(ar); + mutex_unlock(&ar->conf_mutex); } static int ath10k_start_scan(struct ath10k *ar, @@ -1590,23 +4656,26 @@ static int ath10k_start_scan(struct ath10k *ar, if (ret) return ret; - /* make sure we submit the command so the completion - * timeout makes sense */ - ath10k_wmi_flush_tx(ar); - - ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ); + ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ); if (ret == 0) { - ath10k_abort_scan(ar); - return ret; + ret = ath10k_scan_stop(ar); + if (ret) + ath10k_warn(ar, "failed to stop scan: %d\n", ret); + + return -ETIMEDOUT; + } + + /* If we failed to start the scan, return error code at + * this point. This is probably due to some issue in the + * firmware, but no need to wedge the driver due to that... + */ + spin_lock_bh(&ar->data_lock); + if (ar->scan.state == ATH10K_SCAN_IDLE) { + spin_unlock_bh(&ar->data_lock); + return -EINVAL; } + spin_unlock_bh(&ar->data_lock); - /* the scan can complete earlier, before we even - * start the timer. in that case the timer handler - * checks ar->scan.in_progress and bails out if its - * false. Add a 200ms margin to account event/command - * processing. */ - mod_timer(&ar->scan.timeout, jiffies + - msecs_to_jiffies(arg->max_scan_time+200)); return 0; } @@ -1614,134 +4683,869 @@ static int ath10k_start_scan(struct ath10k *ar, /* mac80211 callbacks */ /**********************/ -static void ath10k_tx(struct ieee80211_hw *hw, - struct ieee80211_tx_control *control, - struct sk_buff *skb) +static void ath10k_mac_op_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) { + struct ath10k *ar = hw->priv; + struct ath10k_htt *htt = &ar->htt; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_vif *vif = info->control.vif; + struct ieee80211_sta *sta = control->sta; + struct ieee80211_txq *txq = NULL; + enum ath10k_hw_txrx_mode txmode; + enum ath10k_mac_tx_path txpath; + bool is_htt; + bool is_mgmt; + int ret; + u16 airtime; + + airtime = ath10k_mac_update_airtime(ar, txq, skb); + ath10k_mac_tx_h_fill_cb(ar, vif, txq, sta, skb, airtime); + + txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); + txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); + is_htt = (txpath == ATH10K_MAC_TX_HTT || + txpath == ATH10K_MAC_TX_HTT_MGMT); + is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT); + + if (is_htt) { + bool is_presp = false; + + spin_lock_bh(&ar->htt.tx_lock); + if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) { + struct ieee80211_hdr *hdr = (void *)skb->data; + + is_presp = ieee80211_is_probe_resp(hdr->frame_control); + } + + ret = ath10k_htt_tx_inc_pending(htt); + if (ret) { + ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n", + ret); + spin_unlock_bh(&ar->htt.tx_lock); + ieee80211_free_txskb(ar->hw, skb); + return; + } + + ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); + if (ret) { + ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n", + ret); + ath10k_htt_tx_dec_pending(htt); + spin_unlock_bh(&ar->htt.tx_lock); + ieee80211_free_txskb(ar->hw, skb); + return; + } + spin_unlock_bh(&ar->htt.tx_lock); + } + + ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false); + if (ret) { + ath10k_warn(ar, "failed to transmit frame: %d\n", ret); + if (is_htt) { + spin_lock_bh(&ar->htt.tx_lock); + ath10k_htt_tx_dec_pending(htt); + if (is_mgmt) + ath10k_htt_tx_mgmt_dec_pending(htt); + spin_unlock_bh(&ar->htt.tx_lock); + } + return; + } +} + +static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = NULL; - u32 vdev_id = 0; - u8 tid; + int ret; + u8 ac = txq->ac; + + ath10k_htt_tx_txq_update(hw, txq); + if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH) + return; + + spin_lock_bh(&ar->queue_lock[ac]); - if (info->control.vif) { - arvif = ath10k_vif_to_arvif(info->control.vif); - vdev_id = arvif->vdev_id; - } else if (ar->monitor_enabled) { - vdev_id = ar->monitor_vdev_id; + ieee80211_txq_schedule_start(hw, ac); + txq = ieee80211_next_txq(hw, ac); + if (!txq) + goto out; + + while (ath10k_mac_tx_can_push(hw, txq)) { + ret = ath10k_mac_tx_push_txq(hw, txq); + if (ret < 0) + break; } + ieee80211_return_txq(hw, txq, false); + ath10k_htt_tx_txq_update(hw, txq); +out: + ieee80211_txq_schedule_end(hw, ac); + spin_unlock_bh(&ar->queue_lock[ac]); +} - /* We should disable CCK RATE due to P2P */ - if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE) - ath10k_dbg(ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n"); +/* Must not be called with conf_mutex held as workers can use that also. */ +void ath10k_drain_tx(struct ath10k *ar) +{ + lockdep_assert_not_held(&ar->conf_mutex); + + /* make sure rcu-protected mac80211 tx path itself is drained */ + synchronize_net(); + + ath10k_offchan_tx_purge(ar); + ath10k_mgmt_over_wmi_tx_purge(ar); + + cancel_work_sync(&ar->offchan_tx_work); + cancel_work_sync(&ar->wmi_mgmt_tx_work); +} + +void ath10k_halt(struct ath10k *ar) +{ + struct ath10k_vif *arvif; + + lockdep_assert_held(&ar->conf_mutex); + + clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); + ar->filter_flags = 0; + ar->monitor = false; + ar->monitor_arvif = NULL; + + if (ar->monitor_started) + ath10k_monitor_stop(ar); + + ar->monitor_started = false; + ar->tx_paused = 0; + + ath10k_scan_finish(ar); + ath10k_peer_cleanup_all(ar); + ath10k_stop_radar_confirmation(ar); + ath10k_core_stop(ar); + ath10k_hif_power_down(ar); + + spin_lock_bh(&ar->data_lock); + list_for_each_entry(arvif, &ar->arvifs, list) + ath10k_mac_vif_beacon_cleanup(arvif); + spin_unlock_bh(&ar->data_lock); +} + +static int ath10k_get_antenna(struct ieee80211_hw *hw, int radio_idx, + u32 *tx_ant, u32 *rx_ant) +{ + struct ath10k *ar = hw->priv; + + mutex_lock(&ar->conf_mutex); + + *tx_ant = ar->cfg_tx_chainmask; + *rx_ant = ar->cfg_rx_chainmask; + + mutex_unlock(&ar->conf_mutex); - /* we must calculate tid before we apply qos workaround - * as we'd lose the qos control field */ - tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; - if (ieee80211_is_data_qos(hdr->frame_control) && - is_unicast_ether_addr(ieee80211_get_DA(hdr))) { - u8 *qc = ieee80211_get_qos_ctl(hdr); - tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; + return 0; +} + +static bool ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg) +{ + /* It is not clear that allowing gaps in chainmask + * is helpful. Probably it will not do what user + * is hoping for, so warn in that case. + */ + if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0) + return true; + + ath10k_warn(ar, "mac %s antenna chainmask is invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n", + dbg, cm); + return false; +} + +static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar) +{ + int nsts = ar->vht_cap_info; + + nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; + nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; + + /* If firmware does not deliver to host number of space-time + * streams supported, assume it support up to 4 BF STS and return + * the value for VHT CAP: nsts-1) + */ + if (nsts == 0) + return 3; + + return nsts; +} + +static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar) +{ + int sound_dim = ar->vht_cap_info; + + sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; + sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; + + /* If the sounding dimension is not advertised by the firmware, + * let's use a default value of 1 + */ + if (sound_dim == 0) + return 1; + + return sound_dim; +} + +static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) +{ + struct ieee80211_sta_vht_cap vht_cap = {}; + struct ath10k_hw_params *hw = &ar->hw_params; + u16 mcs_map; + u32 val; + int i; + + vht_cap.vht_supported = 1; + vht_cap.cap = ar->vht_cap_info; + + if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { + val = ath10k_mac_get_vht_cap_bf_sts(ar); + val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; + val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; + + vht_cap.cap |= val; } - ath10k_tx_h_qos_workaround(hw, control, skb); - ath10k_tx_h_update_wep_key(skb); - ath10k_tx_h_add_p2p_noa_ie(ar, skb); - ath10k_tx_h_seq_no(skb); + if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { + val = ath10k_mac_get_vht_cap_bf_sound_dim(ar); + val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; + val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; - memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb))); - ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id; - ATH10K_SKB_CB(skb)->htt.tid = tid; + vht_cap.cap |= val; + } - if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { - spin_lock_bh(&ar->data_lock); - ATH10K_SKB_CB(skb)->htt.is_offchan = true; - ATH10K_SKB_CB(skb)->htt.vdev_id = ar->scan.vdev_id; - spin_unlock_bh(&ar->data_lock); + mcs_map = 0; + for (i = 0; i < 8; i++) { + if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i))) + mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); + else + mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); + } - ath10k_dbg(ATH10K_DBG_MAC, "queued offchannel skb %p\n", skb); + if (ar->cfg_tx_chainmask <= 1) + vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC; - skb_queue_tail(&ar->offchan_tx_queue, skb); - ieee80211_queue_work(hw, &ar->offchan_tx_work); - return; + vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); + vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); + + /* If we are supporting 160Mhz or 80+80, then the NIC may be able to do + * a restricted NSS for 160 or 80+80 vs what it can do for 80Mhz. Give + * user-space a clue if that is the case. + */ + if ((vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) && + (hw->vht160_mcs_rx_highest != 0 || + hw->vht160_mcs_tx_highest != 0)) { + vht_cap.vht_mcs.rx_highest = cpu_to_le16(hw->vht160_mcs_rx_highest); + vht_cap.vht_mcs.tx_highest = cpu_to_le16(hw->vht160_mcs_tx_highest); } - ath10k_tx_htt(ar, skb); + return vht_cap; } -/* - * Initialize various parameters with default vaules. - */ -static int ath10k_start(struct ieee80211_hw *hw) +static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar) +{ + int i; + struct ieee80211_sta_ht_cap ht_cap = {}; + + if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED)) + return ht_cap; + + ht_cap.ht_supported = 1; + ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; + ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; + ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; + ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40; + ht_cap.cap |= + WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT; + + if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI) + ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; + + if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI) + ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; + + if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) { + u32 smps; + + smps = WLAN_HT_CAP_SM_PS_DYNAMIC; + smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT; + + ht_cap.cap |= smps; + } + + if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1)) + ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC; + + if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) { + u32 stbc; + + stbc = ar->ht_cap_info; + stbc &= WMI_HT_CAP_RX_STBC; + stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT; + stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT; + stbc &= IEEE80211_HT_CAP_RX_STBC; + + ht_cap.cap |= stbc; + } + + if (ar->ht_cap_info & WMI_HT_CAP_LDPC || (ar->ht_cap_info & + WMI_HT_CAP_RX_LDPC && (ar->ht_cap_info & WMI_HT_CAP_TX_LDPC))) + ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; + + if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT) + ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT; + + /* max AMSDU is implicitly taken from vht_cap_info */ + if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK) + ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU; + + for (i = 0; i < ar->num_rf_chains; i++) { + if (ar->cfg_rx_chainmask & BIT(i)) + ht_cap.mcs.rx_mask[i] = 0xFF; + } + + ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; + + return ht_cap; +} + +static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar) +{ + struct ieee80211_supported_band *band; + struct ieee80211_sta_vht_cap vht_cap; + struct ieee80211_sta_ht_cap ht_cap; + + ht_cap = ath10k_get_ht_cap(ar); + vht_cap = ath10k_create_vht_cap(ar); + + if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { + band = &ar->mac.sbands[NL80211_BAND_2GHZ]; + band->ht_cap = ht_cap; + } + if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { + band = &ar->mac.sbands[NL80211_BAND_5GHZ]; + band->ht_cap = ht_cap; + band->vht_cap = vht_cap; + } +} + +static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant) +{ + int ret; + bool is_valid_tx_chain_mask, is_valid_rx_chain_mask; + + lockdep_assert_held(&ar->conf_mutex); + + is_valid_tx_chain_mask = ath10k_check_chain_mask(ar, tx_ant, "tx"); + is_valid_rx_chain_mask = ath10k_check_chain_mask(ar, rx_ant, "rx"); + + if (!is_valid_tx_chain_mask || !is_valid_rx_chain_mask) + return -EINVAL; + + ar->cfg_tx_chainmask = tx_ant; + ar->cfg_rx_chainmask = rx_ant; + + if ((ar->state != ATH10K_STATE_ON) && + (ar->state != ATH10K_STATE_RESTARTED)) + return 0; + + ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask, + tx_ant); + if (ret) { + ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n", + ret, tx_ant); + return ret; + } + + ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask, + rx_ant); + if (ret) { + ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n", + ret, rx_ant); + return ret; + } + + /* Reload HT/VHT capability */ + ath10k_mac_setup_ht_vht_cap(ar); + + return 0; +} + +static int ath10k_set_antenna(struct ieee80211_hw *hw, int radio_idx, + u32 tx_ant, u32 rx_ant) { struct ath10k *ar = hw->priv; int ret; - ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1); - if (ret) - ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n", - ret); + mutex_lock(&ar->conf_mutex); + ret = __ath10k_set_antenna(ar, tx_ant, rx_ant); + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static int __ath10k_fetch_bb_timing_dt(struct ath10k *ar, + struct wmi_bb_timing_cfg_arg *bb_timing) +{ + struct device_node *node; + const char *fem_name; + int ret; - ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 0); + node = ar->dev->of_node; + if (!node) + return -ENOENT; + + ret = of_property_read_string_index(node, "ext-fem-name", 0, &fem_name); if (ret) - ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n", - ret); + return -ENOENT; + /* + * If external Front End module used in hardware, then default base band timing + * parameter cannot be used since they were fine tuned for reference hardware, + * so choosing different value suitable for that external FEM. + */ + if (!strcmp("microsemi-lx5586", fem_name)) { + bb_timing->bb_tx_timing = 0x00; + bb_timing->bb_xpa_timing = 0x0101; + } else { + return -ENOENT; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot bb_tx_timing 0x%x bb_xpa_timing 0x%x\n", + bb_timing->bb_tx_timing, bb_timing->bb_xpa_timing); return 0; } -static void ath10k_stop(struct ieee80211_hw *hw) +static int ath10k_mac_rfkill_config(struct ath10k *ar) { - struct ath10k *ar = hw->priv; + u32 param; + int ret; - /* avoid leaks in case FW never confirms scan for offchannel */ - cancel_work_sync(&ar->offchan_tx_work); - ath10k_offchan_tx_purge(ar); + if (ar->hw_values->rfkill_pin == 0) { + ath10k_warn(ar, "ath10k does not support hardware rfkill with this device\n"); + return -EOPNOTSUPP; + } + + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac rfkill_pin %d rfkill_cfg %d rfkill_on_level %d", + ar->hw_values->rfkill_pin, ar->hw_values->rfkill_cfg, + ar->hw_values->rfkill_on_level); + + param = FIELD_PREP(WMI_TLV_RFKILL_CFG_RADIO_LEVEL, + ar->hw_values->rfkill_on_level) | + FIELD_PREP(WMI_TLV_RFKILL_CFG_GPIO_PIN_NUM, + ar->hw_values->rfkill_pin) | + FIELD_PREP(WMI_TLV_RFKILL_CFG_PIN_AS_GPIO, + ar->hw_values->rfkill_cfg); + + ret = ath10k_wmi_pdev_set_param(ar, + ar->wmi.pdev_param->rfkill_config, + param); + if (ret) { + ath10k_warn(ar, + "failed to set rfkill config 0x%x: %d\n", + param, ret); + return ret; + } + return 0; } -static int ath10k_config(struct ieee80211_hw *hw, u32 changed) +int ath10k_mac_rfkill_enable_radio(struct ath10k *ar, bool enable) +{ + enum wmi_tlv_rfkill_enable_radio param; + int ret; + + if (enable) + param = WMI_TLV_RFKILL_ENABLE_RADIO_ON; + else + param = WMI_TLV_RFKILL_ENABLE_RADIO_OFF; + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac rfkill enable %d", param); + + ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rfkill_enable, + param); + if (ret) { + ath10k_warn(ar, "failed to set rfkill enable param %d: %d\n", + param, ret); + return ret; + } + + return 0; +} + +static int ath10k_start(struct ieee80211_hw *hw) { - struct ath10k_generic_iter ar_iter; struct ath10k *ar = hw->priv; - struct ieee80211_conf *conf = &hw->conf; + u32 param; int ret = 0; - u32 flags; + struct wmi_bb_timing_cfg_arg bb_timing = {}; + + /* + * This makes sense only when restarting hw. It is harmless to call + * unconditionally. This is necessary to make sure no HTT/WMI tx + * commands will be submitted while restarting. + */ + ath10k_drain_tx(ar); mutex_lock(&ar->conf_mutex); - if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { - ath10k_dbg(ATH10K_DBG_MAC, "Config channel %d mhz\n", - conf->chandef.chan->center_freq); - spin_lock_bh(&ar->data_lock); - ar->rx_channel = conf->chandef.chan; + switch (ar->state) { + case ATH10K_STATE_OFF: + ar->state = ATH10K_STATE_ON; + break; + case ATH10K_STATE_RESTARTING: + ar->state = ATH10K_STATE_RESTARTED; + break; + case ATH10K_STATE_ON: + case ATH10K_STATE_RESTARTED: + case ATH10K_STATE_WEDGED: + WARN_ON(1); + ret = -EINVAL; + goto err; + case ATH10K_STATE_UTF: + ret = -EBUSY; + goto err; + } + + spin_lock_bh(&ar->data_lock); + + if (ar->hw_rfkill_on) { + ar->hw_rfkill_on = false; spin_unlock_bh(&ar->data_lock); + goto err; + } + + spin_unlock_bh(&ar->data_lock); + + ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_NORMAL); + if (ret) { + ath10k_err(ar, "Could not init hif: %d\n", ret); + goto err_off; + } + + ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL, + &ar->normal_mode_fw); + if (ret) { + ath10k_err(ar, "Could not init core: %d\n", ret); + goto err_power_down; + } + + if (ar->sys_cap_info & WMI_TLV_SYS_CAP_INFO_RFKILL) { + ret = ath10k_mac_rfkill_config(ar); + if (ret && ret != -EOPNOTSUPP) { + ath10k_warn(ar, "failed to configure rfkill: %d", ret); + goto err_core_stop; + } + } + + param = ar->wmi.pdev_param->pmf_qos; + ret = ath10k_wmi_pdev_set_param(ar, param, 1); + if (ret) { + ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret); + goto err_core_stop; + } + + param = ar->wmi.pdev_param->dynamic_bw; + ret = ath10k_wmi_pdev_set_param(ar, param, 1); + if (ret) { + ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret); + goto err_core_stop; + } + + if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) { + ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr); + if (ret) { + ath10k_err(ar, "failed to set prob req oui: %i\n", ret); + goto err_core_stop; + } + } + + if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { + ret = ath10k_wmi_adaptive_qcs(ar, true); + if (ret) { + ath10k_warn(ar, "failed to enable adaptive qcs: %d\n", + ret); + goto err_core_stop; + } + } + + if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) { + param = ar->wmi.pdev_param->burst_enable; + ret = ath10k_wmi_pdev_set_param(ar, param, 0); + if (ret) { + ath10k_warn(ar, "failed to disable burst: %d\n", ret); + goto err_core_stop; + } + } + + param = ar->wmi.pdev_param->idle_ps_config; + ret = ath10k_wmi_pdev_set_param(ar, param, 1); + if (ret && ret != -EOPNOTSUPP) { + ath10k_warn(ar, "failed to enable idle_ps_config: %d\n", ret); + goto err_core_stop; } - if (changed & IEEE80211_CONF_CHANGE_PS) { - memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter)); - ar_iter.ar = ar; - flags = IEEE80211_IFACE_ITER_RESUME_ALL; + __ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask); - ieee80211_iterate_active_interfaces_atomic(hw, - flags, - ath10k_ps_iter, - &ar_iter); + /* + * By default FW set ARP frames ac to voice (6). In that case ARP + * exchange is not working properly for UAPSD enabled AP. ARP requests + * which arrives with access category 0 are processed by network stack + * and send back with access category 0, but FW changes access category + * to 6. Set ARP frames access category to best effort (0) solves + * this problem. + */ - ret = ar_iter.ret; + param = ar->wmi.pdev_param->arp_ac_override; + ret = ath10k_wmi_pdev_set_param(ar, param, 0); + if (ret) { + ath10k_warn(ar, "failed to set arp ac override parameter: %d\n", + ret); + goto err_core_stop; + } + + if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA, + ar->running_fw->fw_file.fw_features)) { + ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1, + WMI_CCA_DETECT_LEVEL_AUTO, + WMI_CCA_DETECT_MARGIN_AUTO); + if (ret) { + ath10k_warn(ar, "failed to enable adaptive cca: %d\n", + ret); + goto err_core_stop; + } + } + + param = ar->wmi.pdev_param->ani_enable; + ret = ath10k_wmi_pdev_set_param(ar, param, 1); + if (ret) { + ath10k_warn(ar, "failed to enable ani by default: %d\n", + ret); + goto err_core_stop; } + ar->ani_enabled = true; + + if (ath10k_peer_stats_enabled(ar)) { + param = ar->wmi.pdev_param->peer_stats_update_period; + ret = ath10k_wmi_pdev_set_param(ar, param, + PEER_DEFAULT_STATS_UPDATE_PERIOD); + if (ret) { + ath10k_warn(ar, + "failed to set peer stats period : %d\n", + ret); + goto err_core_stop; + } + } + + param = ar->wmi.pdev_param->enable_btcoex; + if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) && + test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, + ar->running_fw->fw_file.fw_features) && + ar->coex_support) { + ret = ath10k_wmi_pdev_set_param(ar, param, 0); + if (ret) { + ath10k_warn(ar, + "failed to set btcoex param: %d\n", ret); + goto err_core_stop; + } + clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); + } + + if (test_bit(WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT, ar->wmi.svc_map)) { + ret = __ath10k_fetch_bb_timing_dt(ar, &bb_timing); + if (!ret) { + ret = ath10k_wmi_pdev_bb_timing(ar, &bb_timing); + if (ret) { + ath10k_warn(ar, + "failed to set bb timings: %d\n", + ret); + goto err_core_stop; + } + } + } + + ar->num_started_vdevs = 0; + ath10k_regd_update(ar); + + ath10k_spectral_start(ar); + ath10k_thermal_set_throttling(ar); + + ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE; + + mutex_unlock(&ar->conf_mutex); + return 0; + +err_core_stop: + ath10k_core_stop(ar); + +err_power_down: + ath10k_hif_power_down(ar); + +err_off: + ar->state = ATH10K_STATE_OFF; + +err: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static void ath10k_stop(struct ieee80211_hw *hw, bool suspend) +{ + struct ath10k *ar = hw->priv; + u32 opt; + + ath10k_drain_tx(ar); + + mutex_lock(&ar->conf_mutex); + if (ar->state != ATH10K_STATE_OFF) { + if (!ar->hw_rfkill_on) { + /* If the current driver state is RESTARTING but not yet + * fully RESTARTED because of incoming suspend event, + * then ath10k_halt() is already called via + * ath10k_core_restart() and should not be called here. + */ + if (ar->state != ATH10K_STATE_RESTARTING) { + ath10k_halt(ar); + } else { + /* Suspending here, because when in RESTARTING + * state, ath10k_core_stop() skips + * ath10k_wait_for_suspend(). + */ + opt = WMI_PDEV_SUSPEND_AND_DISABLE_INTR; + ath10k_wait_for_suspend(ar, opt); + } + } + ar->state = ATH10K_STATE_OFF; + } + mutex_unlock(&ar->conf_mutex); + + cancel_work_sync(&ar->set_coverage_class_work); + cancel_delayed_work_sync(&ar->scan.timeout); + cancel_work_sync(&ar->restart_work); + cancel_work_sync(&ar->recovery_check_work); +} + +static int ath10k_config_ps(struct ath10k *ar) +{ + struct ath10k_vif *arvif; + int ret = 0; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + ret = ath10k_mac_vif_setup_ps(arvif); + if (ret) { + ath10k_warn(ar, "failed to setup powersave: %d\n", ret); + break; + } + } + + return ret; +} + +static int ath10k_config(struct ieee80211_hw *hw, int radio_idx, u32 changed) +{ + struct ath10k *ar = hw->priv; + struct ieee80211_conf *conf = &hw->conf; + int ret = 0; + + mutex_lock(&ar->conf_mutex); + + if (changed & IEEE80211_CONF_CHANGE_PS) + ath10k_config_ps(ar); + if (changed & IEEE80211_CONF_CHANGE_MONITOR) { - if (conf->flags & IEEE80211_CONF_MONITOR) - ret = ath10k_monitor_create(ar); - else - ret = ath10k_monitor_destroy(ar); + ar->monitor = conf->flags & IEEE80211_CONF_MONITOR; + ret = ath10k_monitor_recalc(ar); + if (ret) + ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); } mutex_unlock(&ar->conf_mutex); return ret; } +static u32 get_nss_from_chainmask(u16 chain_mask) +{ + if ((chain_mask & 0xf) == 0xf) + return 4; + else if ((chain_mask & 0x7) == 0x7) + return 3; + else if ((chain_mask & 0x3) == 0x3) + return 2; + return 1; +} + +static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif) +{ + u32 value = 0; + struct ath10k *ar = arvif->ar; + int nsts; + int sound_dim; + + if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC) + return 0; + + nsts = ath10k_mac_get_vht_cap_bf_sts(ar); + if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) + value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET); + + sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar); + if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) + value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET); + + if (!value) + return 0; + + if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) + value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; + + if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) + value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER | + WMI_VDEV_PARAM_TXBF_SU_TX_BFER); + + if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) + value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; + + if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) + value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE | + WMI_VDEV_PARAM_TXBF_SU_TX_BFEE); + + return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, + ar->wmi.vdev_param->txbf, value); +} + +static void ath10k_update_vif_offload(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct ath10k *ar = hw->priv; + u32 vdev_param; + int ret; + + if (ath10k_frame_mode != ATH10K_HW_TXRX_ETHERNET || + ar->wmi.vdev_param->tx_encap_type == WMI_VDEV_PARAM_UNSUPPORTED || + (vif->type != NL80211_IFTYPE_STATION && + vif->type != NL80211_IFTYPE_AP)) + vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED; + + vdev_param = ar->wmi.vdev_param->tx_encap_type; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, + ATH10K_HW_TXRX_NATIVE_WIFI); + /* 10.X firmware does not support this VDEV parameter. Do not warn */ + if (ret && ret != -EOPNOTSUPP) { + ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n", + arvif->vdev_id, ret); + } +} + /* * TODO: * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE, @@ -1753,51 +5557,91 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct ath10k_peer *peer; enum wmi_sta_powersave_param param; int ret = 0; u32 value; int bit; + int i; + u32 vdev_param; + + vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; mutex_lock(&ar->conf_mutex); + memset(arvif, 0, sizeof(*arvif)); + ath10k_mac_txq_init(vif->txq); + arvif->ar = ar; arvif->vif = vif; - if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) { - ath10k_warn("Only one monitor interface allowed\n"); - ret = -EBUSY; - goto exit; + INIT_LIST_HEAD(&arvif->list); + INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work); + INIT_DELAYED_WORK(&arvif->connection_loss_work, + ath10k_mac_vif_sta_connection_loss_work); + + for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { + arvif->bitrate_mask.control[i].legacy = 0xffffffff; + memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, + sizeof(arvif->bitrate_mask.control[i].ht_mcs)); + memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, + sizeof(arvif->bitrate_mask.control[i].vht_mcs)); + } + + if (ar->num_peers >= ar->max_num_peers) { + ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n"); + ret = -ENOBUFS; + goto err; } - bit = ffs(ar->free_vdev_map); - if (bit == 0) { + if (ar->free_vdev_map == 0) { + ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n"); ret = -EBUSY; - goto exit; + goto err; } + bit = __ffs64(ar->free_vdev_map); - arvif->vdev_id = bit - 1; - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; - ar->free_vdev_map &= ~(1 << arvif->vdev_id); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n", + bit, ar->free_vdev_map); - if (ar->p2p) - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE; + arvif->vdev_id = bit; + arvif->vdev_subtype = + ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE); switch (vif->type) { + case NL80211_IFTYPE_P2P_DEVICE: + arvif->vdev_type = WMI_VDEV_TYPE_STA; + arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype + (ar, WMI_VDEV_SUBTYPE_P2P_DEVICE); + break; case NL80211_IFTYPE_UNSPECIFIED: case NL80211_IFTYPE_STATION: arvif->vdev_type = WMI_VDEV_TYPE_STA; if (vif->p2p) - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT; + arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype + (ar, WMI_VDEV_SUBTYPE_P2P_CLIENT); break; case NL80211_IFTYPE_ADHOC: arvif->vdev_type = WMI_VDEV_TYPE_IBSS; break; + case NL80211_IFTYPE_MESH_POINT: + if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) { + arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype + (ar, WMI_VDEV_SUBTYPE_MESH_11S); + } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { + ret = -EINVAL; + ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n"); + goto err; + } + arvif->vdev_type = WMI_VDEV_TYPE_AP; + break; case NL80211_IFTYPE_AP: arvif->vdev_type = WMI_VDEV_TYPE_AP; if (vif->p2p) - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO; + arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype + (ar, WMI_VDEV_SUBTYPE_P2P_GO); break; case NL80211_IFTYPE_MONITOR: arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; @@ -1807,32 +5651,164 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, break; } - ath10k_dbg(ATH10K_DBG_MAC, "Add interface: id %d type %d subtype %d\n", - arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype); + /* Using vdev_id as queue number will make it very easy to do per-vif + * tx queue locking. This shouldn't wrap due to interface combinations + * but do a modulo for correctness sake and prevent using offchannel tx + * queues for regular vif tx. + */ + vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1); + for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) + vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1); + + /* Some firmware revisions don't wait for beacon tx completion before + * sending another SWBA event. This could lead to hardware using old + * (freed) beacon data in some cases, e.g. tx credit starvation + * combined with missed TBTT. This is very rare. + * + * On non-IOMMU-enabled hosts this could be a possible security issue + * because hw could beacon some random data on the air. On + * IOMMU-enabled hosts DMAR faults would occur in most cases and target + * device would crash. + * + * Since there are no beacon tx completions (implicit nor explicit) + * propagated to host the only workaround for this is to allocate a + * DMA-coherent buffer for a lifetime of a vif and use it for all + * beacon tx commands. Worst case for this approach is some beacons may + * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap. + */ + if (vif->type == NL80211_IFTYPE_ADHOC || + vif->type == NL80211_IFTYPE_MESH_POINT || + vif->type == NL80211_IFTYPE_AP) { + if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) { + arvif->beacon_buf = kmalloc(IEEE80211_MAX_FRAME_LEN, + GFP_KERNEL); + + /* Using a kernel pointer in place of a dma_addr_t + * token can lead to undefined behavior if that + * makes it into cache management functions. Use a + * known-invalid address token instead, which + * avoids the warning and makes it easier to catch + * bugs if it does end up getting used. + */ + arvif->beacon_paddr = DMA_MAPPING_ERROR; + } else { + arvif->beacon_buf = + dma_alloc_coherent(ar->dev, + IEEE80211_MAX_FRAME_LEN, + &arvif->beacon_paddr, + GFP_ATOMIC); + } + if (!arvif->beacon_buf) { + ret = -ENOMEM; + ath10k_warn(ar, "failed to allocate beacon buffer: %d\n", + ret); + goto err; + } + } + if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags)) + arvif->nohwcrypt = true; + + if (arvif->nohwcrypt && + !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { + ret = -EINVAL; + ath10k_warn(ar, "cryptmode module param needed for sw crypto\n"); + goto err; + } + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n", + arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, + arvif->beacon_buf ? "single-buf" : "per-skb"); ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, vif->addr); if (ret) { - ath10k_warn("WMI vdev create failed: ret %d\n", ret); - goto exit; + ath10k_warn(ar, "failed to create WMI vdev %i: %d\n", + arvif->vdev_id, ret); + goto err; } - ret = ath10k_wmi_vdev_set_param(ar, 0, WMI_VDEV_PARAM_DEF_KEYID, - arvif->def_wep_key_index); - if (ret) - ath10k_warn("Failed to set default keyid: %d\n", ret); + if (test_bit(WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT, + ar->wmi.svc_map)) { + vdev_param = ar->wmi.vdev_param->disable_4addr_src_lrn; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, + WMI_VDEV_DISABLE_4_ADDR_SRC_LRN); + if (ret && ret != -EOPNOTSUPP) { + ath10k_warn(ar, "failed to disable 4addr src lrn vdev %i: %d\n", + arvif->vdev_id, ret); + } + } - ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, - WMI_VDEV_PARAM_TX_ENCAP_TYPE, - ATH10K_HW_TXRX_NATIVE_WIFI); - if (ret) - ath10k_warn("Failed to set TX encap: %d\n", ret); + ar->free_vdev_map &= ~(1LL << arvif->vdev_id); + spin_lock_bh(&ar->data_lock); + list_add(&arvif->list, &ar->arvifs); + spin_unlock_bh(&ar->data_lock); + + /* It makes no sense to have firmware do keepalives. mac80211 already + * takes care of this with idle connection polling. + */ + ret = ath10k_mac_vif_disable_keepalive(arvif); + if (ret) { + ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n", + arvif->vdev_id, ret); + goto err_vdev_delete; + } + + arvif->def_wep_key_idx = -1; + + ath10k_update_vif_offload(hw, vif); + + /* Configuring number of spatial stream for monitor interface is causing + * target assert in qca9888 and qca6174. + */ + if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) { + u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask); + + vdev_param = ar->wmi.vdev_param->nss; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, + nss); + if (ret) { + ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n", + arvif->vdev_id, ar->cfg_tx_chainmask, nss, + ret); + goto err_vdev_delete; + } + } + + if (arvif->vdev_type == WMI_VDEV_TYPE_AP || + arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { + ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id, + vif->addr, WMI_PEER_TYPE_DEFAULT); + if (ret) { + ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n", + arvif->vdev_id, ret); + goto err_vdev_delete; + } + + spin_lock_bh(&ar->data_lock); + + peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr); + if (!peer) { + ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n", + vif->addr, arvif->vdev_id); + spin_unlock_bh(&ar->data_lock); + ret = -ENOENT; + goto err_peer_delete; + } + + arvif->peer_id = find_first_bit(peer->peer_ids, + ATH10K_MAX_NUM_PEER_IDS); + + spin_unlock_bh(&ar->data_lock); + } else { + arvif->peer_id = HTT_INVALID_PEERID; + } if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { - ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr); + ret = ath10k_mac_set_kickout(arvif); if (ret) { - ath10k_warn("Failed to create peer for AP: %d\n", ret); - goto exit; + ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n", + arvif->vdev_id, ret); + goto err_peer_delete; } } @@ -1841,62 +5817,222 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value); - if (ret) - ath10k_warn("Failed to set RX wake policy: %d\n", ret); + if (ret) { + ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n", + arvif->vdev_id, ret); + goto err_peer_delete; + } - param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD; - value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS; - ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, - param, value); - if (ret) - ath10k_warn("Failed to set TX wake thresh: %d\n", ret); + ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif); + if (ret) { + ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", + arvif->vdev_id, ret); + goto err_peer_delete; + } - param = WMI_STA_PS_PARAM_PSPOLL_COUNT; - value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX; - ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, - param, value); - if (ret) - ath10k_warn("Failed to set PSPOLL count: %d\n", ret); + ret = ath10k_mac_vif_recalc_ps_poll_count(arvif); + if (ret) { + ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", + arvif->vdev_id, ret); + goto err_peer_delete; + } } - if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) - ar->monitor_present = true; + ret = ath10k_mac_set_txbf_conf(arvif); + if (ret) { + ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n", + arvif->vdev_id, ret); + goto err_peer_delete; + } + + ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold); + if (ret) { + ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", + arvif->vdev_id, ret); + goto err_peer_delete; + } + + arvif->txpower = vif->bss_conf.txpower; + ret = ath10k_mac_txpower_recalc(ar); + if (ret) { + ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); + goto err_peer_delete; + } + + if (test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map)) { + vdev_param = ar->wmi.vdev_param->rtt_responder_role; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, + arvif->ftm_responder); + + /* It is harmless to not set FTM role. Do not warn */ + if (ret && ret != -EOPNOTSUPP) + ath10k_warn(ar, "failed to set vdev %i FTM Responder: %d\n", + arvif->vdev_id, ret); + } + + if (vif->type == NL80211_IFTYPE_MONITOR) { + ar->monitor_arvif = arvif; + ret = ath10k_monitor_recalc(ar); + if (ret) { + ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); + goto err_peer_delete; + } + } + + spin_lock_bh(&ar->htt.tx_lock); + if (!ar->tx_paused) + ieee80211_wake_queue(ar->hw, arvif->vdev_id); + spin_unlock_bh(&ar->htt.tx_lock); -exit: mutex_unlock(&ar->conf_mutex); + return 0; + +err_peer_delete: + if (arvif->vdev_type == WMI_VDEV_TYPE_AP || + arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { + ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr); + ath10k_wait_for_peer_delete_done(ar, arvif->vdev_id, + vif->addr); + } + +err_vdev_delete: + ath10k_wmi_vdev_delete(ar, arvif->vdev_id); + ar->free_vdev_map |= 1LL << arvif->vdev_id; + spin_lock_bh(&ar->data_lock); + list_del(&arvif->list); + spin_unlock_bh(&ar->data_lock); + +err: + if (arvif->beacon_buf) { + if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) + kfree(arvif->beacon_buf); + else + dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, + arvif->beacon_buf, + arvif->beacon_paddr); + arvif->beacon_buf = NULL; + } + + mutex_unlock(&ar->conf_mutex); + return ret; } +static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif) +{ + int i; + + for (i = 0; i < BITS_PER_LONG; i++) + ath10k_mac_vif_tx_unlock(arvif, i); +} + static void ath10k_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct ath10k_peer *peer; int ret; + int i; + + cancel_work_sync(&arvif->ap_csa_work); + cancel_delayed_work_sync(&arvif->connection_loss_work); mutex_lock(&ar->conf_mutex); - ath10k_dbg(ATH10K_DBG_MAC, "Remove interface: id %d\n", arvif->vdev_id); + ret = ath10k_spectral_vif_stop(arvif); + if (ret) + ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n", + arvif->vdev_id, ret); - ar->free_vdev_map |= 1 << (arvif->vdev_id); + ar->free_vdev_map |= 1LL << arvif->vdev_id; + spin_lock_bh(&ar->data_lock); + list_del(&arvif->list); + spin_unlock_bh(&ar->data_lock); - if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { - ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr); + if (arvif->vdev_type == WMI_VDEV_TYPE_AP || + arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { + ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id, + vif->addr); if (ret) - ath10k_warn("Failed to remove peer for AP: %d\n", ret); + ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n", + arvif->vdev_id, ret); + ath10k_wait_for_peer_delete_done(ar, arvif->vdev_id, + vif->addr); kfree(arvif->u.ap.noa_data); } + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n", + arvif->vdev_id); + ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id); if (ret) - ath10k_warn("WMI vdev delete failed: %d\n", ret); + ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n", + arvif->vdev_id, ret); + + ret = ath10k_vdev_delete_sync(ar); + if (ret) { + ath10k_warn(ar, "Error in receiving vdev delete response: %d\n", ret); + goto out; + } - if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) - ar->monitor_present = false; + /* Some firmware revisions don't notify host about self-peer removal + * until after associated vdev is deleted. + */ + if (arvif->vdev_type == WMI_VDEV_TYPE_AP || + arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { + ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id, + vif->addr); + if (ret) + ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n", + arvif->vdev_id, ret); + + spin_lock_bh(&ar->data_lock); + ar->num_peers--; + spin_unlock_bh(&ar->data_lock); + } + + spin_lock_bh(&ar->data_lock); + for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { + peer = ar->peer_map[i]; + if (!peer) + continue; + + if (peer->vif == vif) { + ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n", + vif->addr, arvif->vdev_id); + peer->vif = NULL; + } + } + + /* Clean this up late, less opportunity for firmware to access + * DMA memory we have deleted. + */ + ath10k_mac_vif_beacon_cleanup(arvif); + spin_unlock_bh(&ar->data_lock); ath10k_peer_cleanup(ar, arvif->vdev_id); + ath10k_mac_txq_unref(ar, vif->txq); + + if (vif->type == NL80211_IFTYPE_MONITOR) { + ar->monitor_arvif = NULL; + ret = ath10k_monitor_recalc(ar); + if (ret) + ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); + } + + ret = ath10k_mac_txpower_recalc(ar); + if (ret) + ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); + spin_lock_bh(&ar->htt.tx_lock); + ath10k_mac_vif_tx_unlock_all(arvif); + spin_unlock_bh(&ar->htt.tx_lock); + + ath10k_mac_txq_unref(ar, vif->txq); + +out: mutex_unlock(&ar->conf_mutex); } @@ -1904,8 +6040,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, * FIXME: Has to be verified. */ #define SUPPORTED_FILTERS \ - (FIF_PROMISC_IN_BSS | \ - FIF_ALLMULTI | \ + (FIF_ALLMULTI | \ FIF_CONTROL | \ FIF_PSPOLL | \ FIF_OTHER_BSS | \ @@ -1920,267 +6055,450 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw, { struct ath10k *ar = hw->priv; int ret; + unsigned int supported = SUPPORTED_FILTERS; mutex_lock(&ar->conf_mutex); - changed_flags &= SUPPORTED_FILTERS; - *total_flags &= SUPPORTED_FILTERS; + if (ar->hw_params.mcast_frame_registration) + supported |= FIF_MCAST_ACTION; + + *total_flags &= supported; + ar->filter_flags = *total_flags; - if ((ar->filter_flags & FIF_PROMISC_IN_BSS) && - !ar->monitor_enabled) { - ret = ath10k_monitor_start(ar, ar->monitor_vdev_id); - if (ret) - ath10k_warn("Unable to start monitor mode\n"); - else - ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode started\n"); - } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) && - ar->monitor_enabled) { - ret = ath10k_monitor_stop(ar); - if (ret) - ath10k_warn("Unable to stop monitor mode\n"); - else - ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode stopped\n"); - } + ret = ath10k_monitor_recalc(ar); + if (ret) + ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); mutex_unlock(&ar->conf_mutex); } +static void ath10k_recalculate_mgmt_rate(struct ath10k *ar, + struct ieee80211_vif *vif, + struct cfg80211_chan_def *def) +{ + struct ath10k_vif *arvif = (void *)vif->drv_priv; + const struct ieee80211_supported_band *sband; + u8 basic_rate_idx; + int hw_rate_code; + u32 vdev_param; + u16 bitrate; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + sband = ar->hw->wiphy->bands[def->chan->band]; + basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1; + bitrate = sband->bitrates[basic_rate_idx].bitrate; + + hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate); + if (hw_rate_code < 0) { + ath10k_warn(ar, "bitrate not supported %d\n", bitrate); + return; + } + + vdev_param = ar->wmi.vdev_param->mgmt_rate; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, + hw_rate_code); + if (ret) + ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret); +} + static void ath10k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, - u32 changed) + u64 changed) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); - int ret = 0; + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct cfg80211_chan_def def; + u32 vdev_param, pdev_param, slottime, preamble; + u16 bitrate, hw_value; + u8 rate, rateidx; + int ret = 0, mcast_rate; + enum nl80211_band band; mutex_lock(&ar->conf_mutex); if (changed & BSS_CHANGED_IBSS) - ath10k_control_ibss(arvif, info, vif->addr); + ath10k_control_ibss(arvif, vif); if (changed & BSS_CHANGED_BEACON_INT) { arvif->beacon_interval = info->beacon_int; - ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, - WMI_VDEV_PARAM_BEACON_INTERVAL, + vdev_param = ar->wmi.vdev_param->beacon_interval; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, arvif->beacon_interval); + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac vdev %d beacon_interval %d\n", + arvif->vdev_id, arvif->beacon_interval); + if (ret) - ath10k_warn("Failed to set beacon interval for VDEV: %d\n", - arvif->vdev_id); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Beacon interval: %d set for VDEV: %d\n", - arvif->beacon_interval, arvif->vdev_id); + ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n", + arvif->vdev_id, ret); } if (changed & BSS_CHANGED_BEACON) { - ret = ath10k_wmi_pdev_set_param(ar, - WMI_PDEV_PARAM_BEACON_TX_MODE, + ath10k_dbg(ar, ATH10K_DBG_MAC, + "vdev %d set beacon tx mode to staggered\n", + arvif->vdev_id); + + pdev_param = ar->wmi.pdev_param->beacon_tx_mode; + ret = ath10k_wmi_pdev_set_param(ar, pdev_param, WMI_BEACON_STAGGERED_MODE); if (ret) - ath10k_warn("Failed to set beacon mode for VDEV: %d\n", - arvif->vdev_id); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Set staggered beacon mode for VDEV: %d\n", - arvif->vdev_id); + ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n", + arvif->vdev_id, ret); + + ret = ath10k_mac_setup_bcn_tmpl(arvif); + if (ret) + ath10k_warn(ar, "failed to update beacon template: %d\n", + ret); + + if (ieee80211_vif_is_mesh(vif)) { + /* mesh doesn't use SSID but firmware needs it */ + arvif->u.ap.ssid_len = 4; + memcpy(arvif->u.ap.ssid, "mesh", arvif->u.ap.ssid_len); + } } - if (changed & BSS_CHANGED_BEACON_INFO) { + if (changed & BSS_CHANGED_AP_PROBE_RESP) { + ret = ath10k_mac_setup_prb_tmpl(arvif); + if (ret) + ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n", + arvif->vdev_id, ret); + } + + if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { arvif->dtim_period = info->dtim_period; - ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, - WMI_VDEV_PARAM_DTIM_PERIOD, + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac vdev %d dtim_period %d\n", + arvif->vdev_id, arvif->dtim_period); + + vdev_param = ar->wmi.vdev_param->dtim_period; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, arvif->dtim_period); if (ret) - ath10k_warn("Failed to set dtim period for VDEV: %d\n", - arvif->vdev_id); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Set dtim period: %d for VDEV: %d\n", - arvif->dtim_period, arvif->vdev_id); + ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n", + arvif->vdev_id, ret); } if (changed & BSS_CHANGED_SSID && vif->type == NL80211_IFTYPE_AP) { - arvif->u.ap.ssid_len = info->ssid_len; - if (info->ssid_len) - memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len); + arvif->u.ap.ssid_len = vif->cfg.ssid_len; + if (vif->cfg.ssid_len) + memcpy(arvif->u.ap.ssid, vif->cfg.ssid, + vif->cfg.ssid_len); arvif->u.ap.hidden_ssid = info->hidden_ssid; } - if (changed & BSS_CHANGED_BSSID) { - if (!is_zero_ether_addr(info->bssid)) { - ret = ath10k_peer_create(ar, arvif->vdev_id, - info->bssid); - if (ret) - ath10k_warn("Failed to add peer: %pM for VDEV: %d\n", - info->bssid, arvif->vdev_id); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Added peer: %pM for VDEV: %d\n", - info->bssid, arvif->vdev_id); + if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) + ether_addr_copy(arvif->bssid, info->bssid); + if (changed & BSS_CHANGED_FTM_RESPONDER && + arvif->ftm_responder != info->ftm_responder && + test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map)) { + arvif->ftm_responder = info->ftm_responder; - if (vif->type == NL80211_IFTYPE_STATION) { - /* - * this is never erased as we it for crypto key - * clearing; this is FW requirement - */ - memcpy(arvif->u.sta.bssid, info->bssid, - ETH_ALEN); - - ret = ath10k_vdev_start(arvif); - if (!ret) - ath10k_dbg(ATH10K_DBG_MAC, - "VDEV: %d started with BSSID: %pM\n", - arvif->vdev_id, info->bssid); - } + vdev_param = ar->wmi.vdev_param->rtt_responder_role; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, + arvif->ftm_responder); - /* - * Mac80211 does not keep IBSS bssid when leaving IBSS, - * so driver need to store it. It is needed when leaving - * IBSS in order to remove BSSID peer. - */ - if (vif->type == NL80211_IFTYPE_ADHOC) - memcpy(arvif->u.ibss.bssid, info->bssid, - ETH_ALEN); - } + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac vdev %d ftm_responder %d:ret %d\n", + arvif->vdev_id, arvif->ftm_responder, ret); } if (changed & BSS_CHANGED_BEACON_ENABLED) ath10k_control_beaconing(arvif, info); if (changed & BSS_CHANGED_ERP_CTS_PROT) { - u32 cts_prot; - if (info->use_cts_prot) - cts_prot = 1; - else - cts_prot = 0; + arvif->use_cts_prot = info->use_cts_prot; - ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, - WMI_VDEV_PARAM_ENABLE_RTSCTS, - cts_prot); + ret = ath10k_recalc_rtscts_prot(arvif); if (ret) - ath10k_warn("Failed to set CTS prot for VDEV: %d\n", - arvif->vdev_id); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Set CTS prot: %d for VDEV: %d\n", - cts_prot, arvif->vdev_id); + ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", + arvif->vdev_id, ret); + + if (ath10k_mac_can_set_cts_prot(arvif)) { + ret = ath10k_mac_set_cts_prot(arvif); + if (ret) + ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", + arvif->vdev_id, ret); + } } if (changed & BSS_CHANGED_ERP_SLOT) { - u32 slottime; if (info->use_short_slot) slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */ else slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */ - ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, - WMI_VDEV_PARAM_SLOT_TIME, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n", + arvif->vdev_id, slottime); + + vdev_param = ar->wmi.vdev_param->slot_time; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, slottime); if (ret) - ath10k_warn("Failed to set erp slot for VDEV: %d\n", - arvif->vdev_id); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Set slottime: %d for VDEV: %d\n", - slottime, arvif->vdev_id); + ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n", + arvif->vdev_id, ret); } if (changed & BSS_CHANGED_ERP_PREAMBLE) { - u32 preamble; if (info->use_short_preamble) preamble = WMI_VDEV_PREAMBLE_SHORT; else preamble = WMI_VDEV_PREAMBLE_LONG; - ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, - WMI_VDEV_PARAM_PREAMBLE, + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac vdev %d preamble %dn", + arvif->vdev_id, preamble); + + vdev_param = ar->wmi.vdev_param->preamble; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, preamble); if (ret) - ath10k_warn("Failed to set preamble for VDEV: %d\n", - arvif->vdev_id); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Set preamble: %d for VDEV: %d\n", - preamble, arvif->vdev_id); + ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n", + arvif->vdev_id, ret); } if (changed & BSS_CHANGED_ASSOC) { - if (info->assoc) + if (vif->cfg.assoc) { + /* Workaround: Make sure monitor vdev is not running + * when associating to prevent some firmware revisions + * (e.g. 10.1 and 10.2) from crashing. + */ + if (ar->monitor_started) + ath10k_monitor_stop(ar); ath10k_bss_assoc(hw, vif, info); + ath10k_monitor_recalc(ar); + } else { + ath10k_bss_disassoc(hw, vif); + } + } + + if (changed & BSS_CHANGED_TXPOWER) { + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n", + arvif->vdev_id, info->txpower); + + arvif->txpower = info->txpower; + ret = ath10k_mac_txpower_recalc(ar); + if (ret) + ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); } + if (changed & BSS_CHANGED_PS) { + arvif->ps = vif->cfg.ps; + + ret = ath10k_config_ps(ar); + if (ret) + ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n", + arvif->vdev_id, ret); + } + + if (changed & BSS_CHANGED_MCAST_RATE && + !ath10k_mac_vif_chan(arvif->vif, &def)) { + band = def.chan->band; + mcast_rate = vif->bss_conf.mcast_rate[band]; + if (mcast_rate > 0) + rateidx = mcast_rate - 1; + else + rateidx = ffs(vif->bss_conf.basic_rates) - 1; + + if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) + rateidx += ATH10K_MAC_FIRST_OFDM_RATE_IDX; + + bitrate = ath10k_wmi_legacy_rates[rateidx].bitrate; + hw_value = ath10k_wmi_legacy_rates[rateidx].hw_value; + if (ath10k_mac_bitrate_is_cck(bitrate)) + preamble = WMI_RATE_PREAMBLE_CCK; + else + preamble = WMI_RATE_PREAMBLE_OFDM; + + rate = ATH10K_HW_RATECODE(hw_value, 0, preamble); + + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac vdev %d mcast_rate %x\n", + arvif->vdev_id, rate); + + vdev_param = ar->wmi.vdev_param->mcast_data_rate; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, + vdev_param, rate); + if (ret) + ath10k_warn(ar, + "failed to set mcast rate on vdev %i: %d\n", + arvif->vdev_id, ret); + + vdev_param = ar->wmi.vdev_param->bcast_data_rate; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, + vdev_param, rate); + if (ret) + ath10k_warn(ar, + "failed to set bcast rate on vdev %i: %d\n", + arvif->vdev_id, ret); + } + + if (changed & BSS_CHANGED_BASIC_RATES && + !ath10k_mac_vif_chan(arvif->vif, &def)) + ath10k_recalculate_mgmt_rate(ar, vif, &def); + mutex_unlock(&ar->conf_mutex); } +static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, int radio_idx, + s16 value) +{ + struct ath10k *ar = hw->priv; + + /* This function should never be called if setting the coverage class + * is not supported on this hardware. + */ + if (!ar->hw_params.hw_ops->set_coverage_class) { + WARN_ON_ONCE(1); + return; + } + ar->hw_params.hw_ops->set_coverage_class(ar, -1, value); +} + +struct ath10k_mac_tdls_iter_data { + u32 num_tdls_stations; + struct ieee80211_vif *curr_vif; +}; + +static void ath10k_mac_tdls_vif_stations_count_iter(void *data, + struct ieee80211_sta *sta) +{ + struct ath10k_mac_tdls_iter_data *iter_data = data; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ieee80211_vif *sta_vif = arsta->arvif->vif; + + if (sta->tdls && sta_vif == iter_data->curr_vif) + iter_data->num_tdls_stations++; +} + +static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath10k_mac_tdls_iter_data data = {}; + + data.curr_vif = vif; + + ieee80211_iterate_stations_atomic(hw, + ath10k_mac_tdls_vif_stations_count_iter, + &data); + return data.num_tdls_stations; +} + static int ath10k_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct cfg80211_scan_request *req) + struct ieee80211_scan_request *hw_req) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); - struct wmi_start_scan_arg arg; + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct cfg80211_scan_request *req = &hw_req->req; + struct wmi_start_scan_arg *arg = NULL; int ret = 0; int i; + u32 scan_timeout; mutex_lock(&ar->conf_mutex); - spin_lock_bh(&ar->data_lock); - if (ar->scan.in_progress) { - spin_unlock_bh(&ar->data_lock); + if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) { ret = -EBUSY; goto exit; } - INIT_COMPLETION(ar->scan.started); - INIT_COMPLETION(ar->scan.completed); - ar->scan.in_progress = true; - ar->scan.aborting = false; - ar->scan.is_roc = false; - ar->scan.vdev_id = arvif->vdev_id; + spin_lock_bh(&ar->data_lock); + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + reinit_completion(&ar->scan.started); + reinit_completion(&ar->scan.completed); + ar->scan.state = ATH10K_SCAN_STARTING; + ar->scan.is_roc = false; + ar->scan.vdev_id = arvif->vdev_id; + ret = 0; + break; + case ATH10K_SCAN_STARTING: + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: + ret = -EBUSY; + break; + } spin_unlock_bh(&ar->data_lock); - memset(&arg, 0, sizeof(arg)); - ath10k_wmi_start_scan_init(ar, &arg); - arg.vdev_id = arvif->vdev_id; - arg.scan_id = ATH10K_SCAN_ID; + if (ret) + goto exit; - if (!req->no_cck) - arg.scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES; + arg = kzalloc(sizeof(*arg), GFP_KERNEL); + if (!arg) { + ret = -ENOMEM; + goto exit; + } + + ath10k_wmi_start_scan_init(ar, arg); + arg->vdev_id = arvif->vdev_id; + arg->scan_id = ATH10K_SCAN_ID; if (req->ie_len) { - arg.ie_len = req->ie_len; - memcpy(arg.ie, req->ie, arg.ie_len); + arg->ie_len = req->ie_len; + memcpy(arg->ie, req->ie, arg->ie_len); } if (req->n_ssids) { - arg.n_ssids = req->n_ssids; - for (i = 0; i < arg.n_ssids; i++) { - arg.ssids[i].len = req->ssids[i].ssid_len; - arg.ssids[i].ssid = req->ssids[i].ssid; + arg->n_ssids = req->n_ssids; + for (i = 0; i < arg->n_ssids; i++) { + arg->ssids[i].len = req->ssids[i].ssid_len; + arg->ssids[i].ssid = req->ssids[i].ssid; } + } else { + arg->scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; + } + + if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { + arg->scan_ctrl_flags |= WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ; + ether_addr_copy(arg->mac_addr.addr, req->mac_addr); + ether_addr_copy(arg->mac_mask.addr, req->mac_addr_mask); } if (req->n_channels) { - arg.n_channels = req->n_channels; - for (i = 0; i < arg.n_channels; i++) - arg.channels[i] = req->channels[i]->center_freq; + arg->n_channels = req->n_channels; + for (i = 0; i < arg->n_channels; i++) + arg->channels[i] = req->channels[i]->center_freq; + } + + /* if duration is set, default dwell times will be overwritten */ + if (req->duration) { + arg->dwell_time_active = req->duration; + arg->dwell_time_passive = req->duration; + arg->burst_duration_ms = req->duration; + + scan_timeout = min_t(u32, arg->max_rest_time * + (arg->n_channels - 1) + (req->duration + + ATH10K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) * + arg->n_channels, arg->max_scan_time); + } else { + scan_timeout = arg->max_scan_time; } - ret = ath10k_start_scan(ar, &arg); + /* Add a 200ms margin to account for event/command processing */ + scan_timeout += 200; + + ret = ath10k_start_scan(ar, arg); if (ret) { - ath10k_warn("could not start hw scan (%d)\n", ret); + ath10k_warn(ar, "failed to start hw scan: %d\n", ret); spin_lock_bh(&ar->data_lock); - ar->scan.in_progress = false; + ar->scan.state = ATH10K_SCAN_IDLE; spin_unlock_bh(&ar->data_lock); } + ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, + msecs_to_jiffies(scan_timeout)); + exit: + kfree(arg); + mutex_unlock(&ar->conf_mutex); return ret; } @@ -2189,16 +6507,54 @@ static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath10k *ar = hw->priv; - int ret; mutex_lock(&ar->conf_mutex); - ret = ath10k_abort_scan(ar); - if (ret) { - ath10k_warn("couldn't abort scan (%d). forcefully sending scan completion to mac80211\n", - ret); - ieee80211_scan_completed(hw, 1 /* aborted */); - } + ath10k_scan_abort(ar); mutex_unlock(&ar->conf_mutex); + + cancel_delayed_work_sync(&ar->scan.timeout); +} + +static void ath10k_set_key_h_def_keyidx(struct ath10k *ar, + struct ath10k_vif *arvif, + enum set_key_cmd cmd, + struct ieee80211_key_conf *key) +{ + u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid; + int ret; + + /* 10.1 firmware branch requires default key index to be set to group + * key index after installing it. Otherwise FW/HW Txes corrupted + * frames with multi-vif APs. This is not required for main firmware + * branch (e.g. 636). + * + * This is also needed for 636 fw for IBSS-RSN to work more reliably. + * + * FIXME: It remains unknown if this is required for multi-vif STA + * interfaces on 10.1. + */ + + if (arvif->vdev_type != WMI_VDEV_TYPE_AP && + arvif->vdev_type != WMI_VDEV_TYPE_IBSS) + return; + + if (key->cipher == WLAN_CIPHER_SUITE_WEP40) + return; + + if (key->cipher == WLAN_CIPHER_SUITE_WEP104) + return; + + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) + return; + + if (cmd != SET_KEY) + return; + + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, + key->keyidx); + if (ret) + ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n", + arvif->vdev_id, ret); } static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, @@ -2206,62 +6562,133 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_key_conf *key) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct ath10k_sta *arsta; struct ath10k_peer *peer; const u8 *peer_addr; bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 || key->cipher == WLAN_CIPHER_SUITE_WEP104; int ret = 0; + int ret2; + u32 flags = 0; + u32 flags2; + + /* this one needs to be done in software */ + if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || + key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || + key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 || + key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256) + return 1; + + if (arvif->nohwcrypt) + return 1; if (key->keyidx > WMI_MAX_KEY_INDEX) return -ENOSPC; mutex_lock(&ar->conf_mutex); - if (sta) + if (sta) { + arsta = (struct ath10k_sta *)sta->drv_priv; peer_addr = sta->addr; - else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) + spin_lock_bh(&ar->data_lock); + arsta->ucast_cipher = key->cipher; + spin_unlock_bh(&ar->data_lock); + } else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) { peer_addr = vif->bss_conf.bssid; - else + } else { peer_addr = vif->addr; + } key->hw_key_idx = key->keyidx; + if (is_wep) { + if (cmd == SET_KEY) + arvif->wep_keys[key->keyidx] = key; + else + arvif->wep_keys[key->keyidx] = NULL; + } + /* the peer should not disappear in mid-way (unless FW goes awry) since - * we already hold conf_mutex. we just make sure its there now. */ + * we already hold conf_mutex. we just make sure its there now. + */ spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); spin_unlock_bh(&ar->data_lock); if (!peer) { if (cmd == SET_KEY) { - ath10k_warn("cannot install key for non-existent peer %pM\n", + ath10k_warn(ar, "failed to install key for non-existent peer %pM\n", peer_addr); ret = -EOPNOTSUPP; goto exit; } else { - /* if the peer doesn't exist there is no key to disable - * anymore */ + /* if the peer doesn't exist there is no key to disable anymore */ goto exit; } } - if (is_wep) { - if (cmd == SET_KEY) - arvif->wep_keys[key->keyidx] = key; - else - arvif->wep_keys[key->keyidx] = NULL; + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) + flags |= WMI_KEY_PAIRWISE; + else + flags |= WMI_KEY_GROUP; + if (is_wep) { if (cmd == DISABLE_KEY) ath10k_clear_vdev_key(arvif, key); + + /* When WEP keys are uploaded it's possible that there are + * stations associated already (e.g. when merging) without any + * keys. Static WEP needs an explicit per-peer key upload. + */ + if (vif->type == NL80211_IFTYPE_ADHOC && + cmd == SET_KEY) + ath10k_mac_vif_update_wep_key(arvif, key); + + /* 802.1x never sets the def_wep_key_idx so each set_key() + * call changes default tx key. + * + * Static WEP sets def_wep_key_idx via .set_default_unicast_key + * after first set_key(). + */ + if (cmd == SET_KEY && arvif->def_wep_key_idx == -1) + flags |= WMI_KEY_TX_USAGE; } - ret = ath10k_install_key(arvif, key, cmd, peer_addr); + ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags); if (ret) { - ath10k_warn("ath10k_install_key failed (%d)\n", ret); + WARN_ON(ret > 0); + ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n", + arvif->vdev_id, peer_addr, ret); goto exit; } + /* mac80211 sets static WEP keys as groupwise while firmware requires + * them to be installed twice as both pairwise and groupwise. + */ + if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) { + flags2 = flags; + flags2 &= ~WMI_KEY_GROUP; + flags2 |= WMI_KEY_PAIRWISE; + + ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2); + if (ret) { + WARN_ON(ret > 0); + ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n", + arvif->vdev_id, peer_addr, ret); + ret2 = ath10k_install_key(arvif, key, DISABLE_KEY, + peer_addr, flags); + if (ret2) { + WARN_ON(ret2 > 0); + ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n", + arvif->vdev_id, peer_addr, ret2); + } + goto exit; + } + } + + ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key); + spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); if (peer && cmd == SET_KEY) @@ -2270,14 +6697,799 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, peer->keys[key->keyidx] = NULL; else if (peer == NULL) /* impossible unless FW goes crazy */ - ath10k_warn("peer %pM disappeared!\n", peer_addr); + ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr); spin_unlock_bh(&ar->data_lock); + if (sta && sta->tdls) + ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, + ar->wmi.peer_param->authorize, 1); + else if (sta && cmd == SET_KEY && (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + ath10k_wmi_peer_set_param(ar, arvif->vdev_id, peer_addr, + ar->wmi.peer_param->authorize, 1); + exit: mutex_unlock(&ar->conf_mutex); return ret; } +static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + int keyidx) +{ + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = (void *)vif->drv_priv; + int ret; + + mutex_lock(&arvif->ar->conf_mutex); + + if (arvif->ar->state != ATH10K_STATE_ON) + goto unlock; + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", + arvif->vdev_id, keyidx); + + ret = ath10k_wmi_vdev_set_param(arvif->ar, + arvif->vdev_id, + arvif->ar->wmi.vdev_param->def_keyid, + keyidx); + + if (ret) { + ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n", + arvif->vdev_id, + ret); + goto unlock; + } + + arvif->def_wep_key_idx = keyidx; + +unlock: + mutex_unlock(&arvif->ar->conf_mutex); +} + +static void ath10k_sta_rc_update_wk(struct work_struct *wk) +{ + struct ath10k *ar; + struct ath10k_vif *arvif; + struct ath10k_sta *arsta; + struct ieee80211_sta *sta; + struct cfg80211_chan_def def; + enum nl80211_band band; + const u8 *ht_mcs_mask; + const u16 *vht_mcs_mask; + u32 changed, bw, nss, smps; + int err; + + arsta = container_of(wk, struct ath10k_sta, update_wk); + sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv); + arvif = arsta->arvif; + ar = arvif->ar; + + if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) + return; + + band = def.chan->band; + ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; + vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; + + spin_lock_bh(&ar->data_lock); + + changed = arsta->changed; + arsta->changed = 0; + + bw = arsta->bw; + nss = arsta->nss; + smps = arsta->smps; + + spin_unlock_bh(&ar->data_lock); + + mutex_lock(&ar->conf_mutex); + + nss = max_t(u32, 1, nss); + nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask), + ath10k_mac_max_vht_nss(vht_mcs_mask))); + + if (changed & IEEE80211_RC_BW_CHANGED) { + enum wmi_phy_mode mode; + + mode = chan_to_phymode(&def); + ath10k_dbg(ar, ATH10K_DBG_STA, "mac update sta %pM peer bw %d phymode %d\n", + sta->addr, bw, mode); + + err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, + ar->wmi.peer_param->phymode, mode); + if (err) { + ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n", + sta->addr, mode, err); + goto exit; + } + + err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, + ar->wmi.peer_param->chan_width, bw); + if (err) + ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n", + sta->addr, bw, err); + } + + if (changed & IEEE80211_RC_NSS_CHANGED) { + ath10k_dbg(ar, ATH10K_DBG_STA, "mac update sta %pM nss %d\n", + sta->addr, nss); + + err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, + ar->wmi.peer_param->nss, nss); + if (err) + ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n", + sta->addr, nss, err); + } + + if (changed & IEEE80211_RC_SMPS_CHANGED) { + ath10k_dbg(ar, ATH10K_DBG_STA, "mac update sta %pM smps %d\n", + sta->addr, smps); + + err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, + ar->wmi.peer_param->smps_state, smps); + if (err) + ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n", + sta->addr, smps, err); + } + + if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { + ath10k_dbg(ar, ATH10K_DBG_STA, "mac update sta %pM supp rates\n", + sta->addr); + + err = ath10k_station_assoc(ar, arvif->vif, sta, true); + if (err) + ath10k_warn(ar, "failed to reassociate station: %pM\n", + sta->addr); + } + +exit: + mutex_unlock(&ar->conf_mutex); +} + +static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif, + struct ieee80211_sta *sta) +{ + struct ath10k *ar = arvif->ar; + + lockdep_assert_held(&ar->conf_mutex); + + if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) + return 0; + + if (ar->num_stations >= ar->max_num_stations) + return -ENOBUFS; + + ar->num_stations++; + + return 0; +} + +static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif, + struct ieee80211_sta *sta) +{ + struct ath10k *ar = arvif->ar; + + lockdep_assert_held(&ar->conf_mutex); + + if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) + return; + + ar->num_stations--; +} + +static int ath10k_sta_set_txpwr(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = (void *)vif->drv_priv; + int ret = 0; + s16 txpwr; + + if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) { + txpwr = 0; + } else { + txpwr = sta->deflink.txpwr.power; + if (!txpwr) + return -EINVAL; + } + + if (txpwr > ATH10K_TX_POWER_MAX_VAL || txpwr < ATH10K_TX_POWER_MIN_VAL) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + + ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, + ar->wmi.peer_param->use_fixed_power, txpwr); + if (ret) { + ath10k_warn(ar, "failed to set tx power for station ret: %d\n", + ret); + goto out; + } + +out: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +struct ath10k_mac_iter_tid_conf_data { + struct ieee80211_vif *curr_vif; + struct ath10k *ar; + bool reset_config; +}; + +static bool +ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar, + enum nl80211_band band, + const struct cfg80211_bitrate_mask *mask, + int *vht_num_rates) +{ + int num_rates = 0; + int i, tmp; + + num_rates += hweight32(mask->control[band].legacy); + + for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) + num_rates += hweight8(mask->control[band].ht_mcs[i]); + + *vht_num_rates = 0; + for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { + tmp = hweight16(mask->control[band].vht_mcs[i]); + num_rates += tmp; + *vht_num_rates += tmp; + } + + return num_rates == 1; +} + +static int +ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar, + enum nl80211_band band, + const struct cfg80211_bitrate_mask *mask, + u8 *rate, u8 *nss, bool vht_only) +{ + int rate_idx; + int i; + u16 bitrate; + u8 preamble; + u8 hw_rate; + + if (vht_only) + goto next; + + if (hweight32(mask->control[band].legacy) == 1) { + rate_idx = ffs(mask->control[band].legacy) - 1; + + if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) + rate_idx += ATH10K_MAC_FIRST_OFDM_RATE_IDX; + + hw_rate = ath10k_wmi_legacy_rates[rate_idx].hw_value; + bitrate = ath10k_wmi_legacy_rates[rate_idx].bitrate; + + if (ath10k_mac_bitrate_is_cck(bitrate)) + preamble = WMI_RATE_PREAMBLE_CCK; + else + preamble = WMI_RATE_PREAMBLE_OFDM; + + *nss = 1; + *rate = preamble << 6 | + (*nss - 1) << 4 | + hw_rate << 0; + + return 0; + } + + for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { + if (hweight8(mask->control[band].ht_mcs[i]) == 1) { + *nss = i + 1; + *rate = WMI_RATE_PREAMBLE_HT << 6 | + (*nss - 1) << 4 | + (ffs(mask->control[band].ht_mcs[i]) - 1); + + return 0; + } + } + +next: + for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { + if (hweight16(mask->control[band].vht_mcs[i]) == 1) { + *nss = i + 1; + *rate = WMI_RATE_PREAMBLE_VHT << 6 | + (*nss - 1) << 4 | + (ffs(mask->control[band].vht_mcs[i]) - 1); + + return 0; + } + } + + return -EINVAL; +} + +static int ath10k_mac_validate_rate_mask(struct ath10k *ar, + struct ieee80211_sta *sta, + u32 rate_ctrl_flag, u8 nss) +{ + struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; + struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; + + if (nss > sta->deflink.rx_nss) { + ath10k_warn(ar, "Invalid nss field, configured %u limit %u\n", + nss, sta->deflink.rx_nss); + return -EINVAL; + } + + if (ATH10K_HW_PREAMBLE(rate_ctrl_flag) == WMI_RATE_PREAMBLE_VHT) { + if (!vht_cap->vht_supported) { + ath10k_warn(ar, "Invalid VHT rate for sta %pM\n", + sta->addr); + return -EINVAL; + } + } else if (ATH10K_HW_PREAMBLE(rate_ctrl_flag) == WMI_RATE_PREAMBLE_HT) { + if (!ht_cap->ht_supported || vht_cap->vht_supported) { + ath10k_warn(ar, "Invalid HT rate for sta %pM\n", + sta->addr); + return -EINVAL; + } + } else { + if (ht_cap->ht_supported || vht_cap->vht_supported) + return -EINVAL; + } + + return 0; +} + +static int +ath10k_mac_tid_bitrate_config(struct ath10k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u32 *rate_ctrl_flag, u8 *rate_ctrl, + enum nl80211_tx_rate_setting txrate_type, + const struct cfg80211_bitrate_mask *mask) +{ + struct cfg80211_chan_def def; + enum nl80211_band band; + u8 nss, rate; + int vht_num_rates, ret; + + if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) + return -EINVAL; + + if (txrate_type == NL80211_TX_RATE_AUTOMATIC) { + *rate_ctrl = WMI_TID_CONFIG_RATE_CONTROL_AUTO; + *rate_ctrl_flag = 0; + return 0; + } + + band = def.chan->band; + + if (!ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask, + &vht_num_rates)) { + return -EINVAL; + } + + ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask, + &rate, &nss, false); + if (ret) { + ath10k_warn(ar, "failed to get single rate: %d\n", + ret); + return ret; + } + + *rate_ctrl_flag = rate; + + if (sta && ath10k_mac_validate_rate_mask(ar, sta, *rate_ctrl_flag, nss)) + return -EINVAL; + + if (txrate_type == NL80211_TX_RATE_FIXED) + *rate_ctrl = WMI_TID_CONFIG_RATE_CONTROL_FIXED_RATE; + else if (txrate_type == NL80211_TX_RATE_LIMITED && + (test_bit(WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT, + ar->wmi.svc_map))) + *rate_ctrl = WMI_PEER_TID_CONFIG_RATE_UPPER_CAP; + else + return -EOPNOTSUPP; + + return 0; +} + +static int ath10k_mac_set_tid_config(struct ath10k *ar, struct ieee80211_sta *sta, + struct ieee80211_vif *vif, u32 changed, + struct wmi_per_peer_per_tid_cfg_arg *arg) +{ + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct ath10k_sta *arsta; + int ret; + + if (sta) { + if (!sta->wme) + return -EOPNOTSUPP; + + arsta = (struct ath10k_sta *)sta->drv_priv; + + if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) { + if ((arsta->retry_long[arg->tid] > 0 || + arsta->rate_code[arg->tid] > 0 || + arsta->ampdu[arg->tid] == + WMI_TID_CONFIG_AGGR_CONTROL_ENABLE) && + arg->ack_policy == WMI_PEER_TID_CONFIG_NOACK) { + changed &= ~BIT(NL80211_TID_CONFIG_ATTR_NOACK); + arg->ack_policy = 0; + arg->aggr_control = 0; + arg->rate_ctrl = 0; + arg->rcode_flags = 0; + } + } + + if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) { + if (arsta->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK || + arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK) { + arg->aggr_control = 0; + changed &= ~BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG); + } + } + + if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) | + BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) { + if (arsta->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK || + arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK) { + arg->rate_ctrl = 0; + arg->rcode_flags = 0; + } + } + + ether_addr_copy(arg->peer_macaddr.addr, sta->addr); + + ret = ath10k_wmi_set_per_peer_per_tid_cfg(ar, arg); + if (ret) + return ret; + + /* Store the configured parameters in success case */ + if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) { + arsta->noack[arg->tid] = arg->ack_policy; + arg->ack_policy = 0; + arg->aggr_control = 0; + arg->rate_ctrl = 0; + arg->rcode_flags = 0; + } + + if (changed & BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG)) { + arsta->retry_long[arg->tid] = arg->retry_count; + arg->retry_count = 0; + } + + if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) { + arsta->ampdu[arg->tid] = arg->aggr_control; + arg->aggr_control = 0; + } + + if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) | + BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) { + arsta->rate_ctrl[arg->tid] = arg->rate_ctrl; + arg->rate_ctrl = 0; + arg->rcode_flags = 0; + } + + if (changed & BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL)) { + arsta->rtscts[arg->tid] = arg->rtscts_ctrl; + arg->ext_tid_cfg_bitmap = 0; + } + } else { + if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) { + if ((arvif->retry_long[arg->tid] || + arvif->rate_code[arg->tid] || + arvif->ampdu[arg->tid] == + WMI_TID_CONFIG_AGGR_CONTROL_ENABLE) && + arg->ack_policy == WMI_PEER_TID_CONFIG_NOACK) { + changed &= ~BIT(NL80211_TID_CONFIG_ATTR_NOACK); + } else { + arvif->noack[arg->tid] = arg->ack_policy; + arvif->ampdu[arg->tid] = arg->aggr_control; + arvif->rate_ctrl[arg->tid] = arg->rate_ctrl; + } + } + + if (changed & BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG)) { + if (arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK) + changed &= ~BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG); + else + arvif->retry_long[arg->tid] = arg->retry_count; + } + + if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) { + if (arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK) + changed &= ~BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL); + else + arvif->ampdu[arg->tid] = arg->aggr_control; + } + + if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) | + BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) { + if (arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK) { + changed &= ~(BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) | + BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE)); + } else { + arvif->rate_ctrl[arg->tid] = arg->rate_ctrl; + arvif->rate_code[arg->tid] = arg->rcode_flags; + } + } + + if (changed & BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL)) { + arvif->rtscts[arg->tid] = arg->rtscts_ctrl; + arg->ext_tid_cfg_bitmap = 0; + } + + if (changed) + arvif->tid_conf_changed[arg->tid] |= changed; + } + + return 0; +} + +static int +ath10k_mac_parse_tid_config(struct ath10k *ar, + struct ieee80211_sta *sta, + struct ieee80211_vif *vif, + struct cfg80211_tid_cfg *tid_conf, + struct wmi_per_peer_per_tid_cfg_arg *arg) +{ + u32 changed = tid_conf->mask; + int ret = 0, i = 0; + + if (!changed) + return -EINVAL; + + while (i < ATH10K_TID_MAX) { + if (!(tid_conf->tids & BIT(i))) { + i++; + continue; + } + + arg->tid = i; + + if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) { + if (tid_conf->noack == NL80211_TID_CONFIG_ENABLE) { + arg->ack_policy = WMI_PEER_TID_CONFIG_NOACK; + arg->rate_ctrl = + WMI_TID_CONFIG_RATE_CONTROL_DEFAULT_LOWEST_RATE; + arg->aggr_control = + WMI_TID_CONFIG_AGGR_CONTROL_DISABLE; + } else { + arg->ack_policy = + WMI_PEER_TID_CONFIG_ACK; + arg->rate_ctrl = + WMI_TID_CONFIG_RATE_CONTROL_AUTO; + arg->aggr_control = + WMI_TID_CONFIG_AGGR_CONTROL_ENABLE; + } + } + + if (changed & BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG)) + arg->retry_count = tid_conf->retry_long; + + if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) { + if (tid_conf->noack == NL80211_TID_CONFIG_ENABLE) + arg->aggr_control = WMI_TID_CONFIG_AGGR_CONTROL_ENABLE; + else + arg->aggr_control = WMI_TID_CONFIG_AGGR_CONTROL_DISABLE; + } + + if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) | + BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) { + ret = ath10k_mac_tid_bitrate_config(ar, vif, sta, + &arg->rcode_flags, + &arg->rate_ctrl, + tid_conf->txrate_type, + &tid_conf->txrate_mask); + if (ret) { + ath10k_warn(ar, "failed to configure bitrate mask %d\n", + ret); + arg->rcode_flags = 0; + arg->rate_ctrl = 0; + } + } + + if (changed & BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL)) { + if (tid_conf->rtscts) + arg->rtscts_ctrl = tid_conf->rtscts; + + arg->ext_tid_cfg_bitmap = WMI_EXT_TID_RTS_CTS_CONFIG; + } + + ret = ath10k_mac_set_tid_config(ar, sta, vif, changed, arg); + if (ret) + return ret; + i++; + } + + return ret; +} + +static int ath10k_mac_reset_tid_config(struct ath10k *ar, + struct ieee80211_sta *sta, + struct ath10k_vif *arvif, + u8 tids) +{ + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct wmi_per_peer_per_tid_cfg_arg arg; + int ret = 0, i = 0; + + arg.vdev_id = arvif->vdev_id; + while (i < ATH10K_TID_MAX) { + if (!(tids & BIT(i))) { + i++; + continue; + } + + arg.tid = i; + arg.ack_policy = WMI_PEER_TID_CONFIG_ACK; + arg.retry_count = ATH10K_MAX_RETRY_COUNT; + arg.rate_ctrl = WMI_TID_CONFIG_RATE_CONTROL_AUTO; + arg.aggr_control = WMI_TID_CONFIG_AGGR_CONTROL_ENABLE; + arg.rtscts_ctrl = WMI_TID_CONFIG_RTSCTS_CONTROL_ENABLE; + arg.ext_tid_cfg_bitmap = WMI_EXT_TID_RTS_CTS_CONFIG; + + ether_addr_copy(arg.peer_macaddr.addr, sta->addr); + + ret = ath10k_wmi_set_per_peer_per_tid_cfg(ar, &arg); + if (ret) + return ret; + + if (!arvif->tids_rst) { + arsta->retry_long[i] = -1; + arsta->noack[i] = -1; + arsta->ampdu[i] = -1; + arsta->rate_code[i] = -1; + arsta->rate_ctrl[i] = 0; + arsta->rtscts[i] = -1; + } else { + arvif->retry_long[i] = 0; + arvif->noack[i] = 0; + arvif->ampdu[i] = 0; + arvif->rate_code[i] = 0; + arvif->rate_ctrl[i] = 0; + arvif->rtscts[i] = 0; + } + + i++; + } + + return ret; +} + +static void ath10k_sta_tid_cfg_wk(struct work_struct *wk) +{ + struct wmi_per_peer_per_tid_cfg_arg arg = {}; + struct ieee80211_sta *sta; + struct ath10k_sta *arsta; + struct ath10k_vif *arvif; + struct ath10k *ar; + bool config_apply; + int ret, i; + u32 changed; + u8 nss; + + arsta = container_of(wk, struct ath10k_sta, tid_config_wk); + sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv); + arvif = arsta->arvif; + ar = arvif->ar; + + mutex_lock(&ar->conf_mutex); + + if (arvif->tids_rst) { + ret = ath10k_mac_reset_tid_config(ar, sta, arvif, + arvif->tids_rst); + goto exit; + } + + ether_addr_copy(arg.peer_macaddr.addr, sta->addr); + + for (i = 0; i < ATH10K_TID_MAX; i++) { + config_apply = false; + changed = arvif->tid_conf_changed[i]; + + if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) { + if (arsta->noack[i] != -1) { + arg.ack_policy = 0; + } else { + config_apply = true; + arg.ack_policy = arvif->noack[i]; + arg.aggr_control = arvif->ampdu[i]; + arg.rate_ctrl = arvif->rate_ctrl[i]; + } + } + + if (changed & BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG)) { + if (arsta->retry_long[i] != -1 || + arsta->noack[i] == WMI_PEER_TID_CONFIG_NOACK || + arvif->noack[i] == WMI_PEER_TID_CONFIG_NOACK) { + arg.retry_count = 0; + } else { + arg.retry_count = arvif->retry_long[i]; + config_apply = true; + } + } + + if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) { + if (arsta->ampdu[i] != -1 || + arsta->noack[i] == WMI_PEER_TID_CONFIG_NOACK || + arvif->noack[i] == WMI_PEER_TID_CONFIG_NOACK) { + arg.aggr_control = 0; + } else { + arg.aggr_control = arvif->ampdu[i]; + config_apply = true; + } + } + + if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) | + BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) { + nss = ATH10K_HW_NSS(arvif->rate_code[i]); + ret = ath10k_mac_validate_rate_mask(ar, sta, + arvif->rate_code[i], + nss); + if (ret && + arvif->rate_ctrl[i] > WMI_TID_CONFIG_RATE_CONTROL_AUTO) { + arg.rate_ctrl = 0; + arg.rcode_flags = 0; + } + + if (arsta->rate_ctrl[i] > + WMI_TID_CONFIG_RATE_CONTROL_AUTO || + arsta->noack[i] == WMI_PEER_TID_CONFIG_NOACK || + arvif->noack[i] == WMI_PEER_TID_CONFIG_NOACK) { + arg.rate_ctrl = 0; + arg.rcode_flags = 0; + } else { + arg.rate_ctrl = arvif->rate_ctrl[i]; + arg.rcode_flags = arvif->rate_code[i]; + config_apply = true; + } + } + + if (changed & BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL)) { + if (arsta->rtscts[i]) { + arg.rtscts_ctrl = 0; + arg.ext_tid_cfg_bitmap = 0; + } else { + arg.rtscts_ctrl = arvif->rtscts[i] - 1; + arg.ext_tid_cfg_bitmap = + WMI_EXT_TID_RTS_CTS_CONFIG; + config_apply = true; + } + } + + arg.tid = i; + + if (config_apply) { + ret = ath10k_wmi_set_per_peer_per_tid_cfg(ar, &arg); + if (ret) + ath10k_warn(ar, "failed to set per tid config for sta %pM: %d\n", + sta->addr, ret); + } + + arg.ack_policy = 0; + arg.retry_count = 0; + arg.aggr_control = 0; + arg.rate_ctrl = 0; + arg.rcode_flags = 0; + } + +exit: + mutex_unlock(&ar->conf_mutex); +} + +static void ath10k_mac_vif_stations_tid_conf(void *data, + struct ieee80211_sta *sta) +{ + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k_mac_iter_tid_conf_data *iter_data = data; + struct ieee80211_vif *sta_vif = arsta->arvif->vif; + + if (sta_vif != iter_data->curr_vif || !sta->wme) + return; + + ieee80211_queue_work(iter_data->ar->hw, &arsta->tid_config_wk); +} + static int ath10k_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -2285,84 +7497,270 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, enum ieee80211_sta_state new_state) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k_peer *peer; int ret = 0; + int i; + + if (old_state == IEEE80211_STA_NOTEXIST && + new_state == IEEE80211_STA_NONE) { + memset(arsta, 0, sizeof(*arsta)); + arsta->arvif = arvif; + arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED; + INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk); + INIT_WORK(&arsta->tid_config_wk, ath10k_sta_tid_cfg_wk); + + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) + ath10k_mac_txq_init(sta->txq[i]); + } + + /* cancel must be done outside the mutex to avoid deadlock */ + if ((old_state == IEEE80211_STA_NONE && + new_state == IEEE80211_STA_NOTEXIST)) { + cancel_work_sync(&arsta->update_wk); + cancel_work_sync(&arsta->tid_config_wk); + } mutex_lock(&ar->conf_mutex); if (old_state == IEEE80211_STA_NOTEXIST && - new_state == IEEE80211_STA_NONE && - vif->type != NL80211_IFTYPE_STATION) { + new_state == IEEE80211_STA_NONE) { /* * New station addition. */ - ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr); - if (ret) - ath10k_warn("Failed to add peer: %pM for VDEV: %d\n", - sta->addr, arvif->vdev_id); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Added peer: %pM for VDEV: %d\n", - sta->addr, arvif->vdev_id); + enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT; + u32 num_tdls_stations; + + ath10k_dbg(ar, ATH10K_DBG_STA, + "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n", + arvif->vdev_id, sta->addr, + ar->num_stations + 1, ar->max_num_stations, + ar->num_peers + 1, ar->max_num_peers); + + num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif); + + if (sta->tdls) { + if (num_tdls_stations >= ar->max_num_tdls_vdevs) { + ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n", + arvif->vdev_id, + ar->max_num_tdls_vdevs); + ret = -ELNRNG; + goto exit; + } + peer_type = WMI_PEER_TYPE_TDLS; + } + + ret = ath10k_mac_inc_num_stations(arvif, sta); + if (ret) { + ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n", + ar->max_num_stations); + goto exit; + } + + if (ath10k_debug_is_extd_tx_stats_enabled(ar)) { + arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), + GFP_KERNEL); + if (!arsta->tx_stats) { + ath10k_mac_dec_num_stations(arvif, sta); + ret = -ENOMEM; + goto exit; + } + } + + ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id, + sta->addr, peer_type); + if (ret) { + ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n", + sta->addr, arvif->vdev_id, ret); + ath10k_mac_dec_num_stations(arvif, sta); + kfree(arsta->tx_stats); + goto exit; + } + + spin_lock_bh(&ar->data_lock); + + peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr); + if (!peer) { + ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n", + vif->addr, arvif->vdev_id); + spin_unlock_bh(&ar->data_lock); + ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); + ath10k_mac_dec_num_stations(arvif, sta); + kfree(arsta->tx_stats); + ret = -ENOENT; + goto exit; + } + + arsta->peer_id = find_first_bit(peer->peer_ids, + ATH10K_MAX_NUM_PEER_IDS); + + spin_unlock_bh(&ar->data_lock); + + if (!sta->tdls) + goto exit; + + ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, + WMI_TDLS_ENABLE_ACTIVE); + if (ret) { + ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n", + arvif->vdev_id, ret); + ath10k_peer_delete(ar, arvif->vdev_id, + sta->addr); + ath10k_mac_dec_num_stations(arvif, sta); + kfree(arsta->tx_stats); + goto exit; + } + + ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta, + WMI_TDLS_PEER_STATE_PEERING); + if (ret) { + ath10k_warn(ar, + "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n", + sta->addr, arvif->vdev_id, ret); + ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); + ath10k_mac_dec_num_stations(arvif, sta); + kfree(arsta->tx_stats); + + if (num_tdls_stations != 0) + goto exit; + ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, + WMI_TDLS_DISABLE); + } } else if ((old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST)) { /* * Existing station deletion. */ + ath10k_dbg(ar, ATH10K_DBG_STA, + "mac vdev %d peer delete %pM sta %p (sta gone)\n", + arvif->vdev_id, sta->addr, sta); + + if (sta->tdls) { + ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, + sta, + WMI_TDLS_PEER_STATE_TEARDOWN); + if (ret) + ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n", + sta->addr, + WMI_TDLS_PEER_STATE_TEARDOWN, ret); + } + ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); if (ret) - ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n", - sta->addr, arvif->vdev_id); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Removed peer: %pM for VDEV: %d\n", - sta->addr, arvif->vdev_id); + ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n", + sta->addr, arvif->vdev_id, ret); - if (vif->type == NL80211_IFTYPE_STATION) - ath10k_bss_disassoc(hw, vif); + ath10k_mac_dec_num_stations(arvif, sta); + + spin_lock_bh(&ar->data_lock); + for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { + peer = ar->peer_map[i]; + if (!peer) + continue; + + if (peer->sta == sta) { + ath10k_warn(ar, "found sta peer %pM (ptr %p id %d) entry on vdev %i after it was supposedly removed\n", + sta->addr, peer, i, arvif->vdev_id); + peer->sta = NULL; + + /* Clean up the peer object as well since we + * must have failed to do this above. + */ + ath10k_peer_map_cleanup(ar, peer); + } + } + spin_unlock_bh(&ar->data_lock); + + if (ath10k_debug_is_extd_tx_stats_enabled(ar)) { + kfree(arsta->tx_stats); + arsta->tx_stats = NULL; + } + + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) + ath10k_mac_txq_unref(ar, sta->txq[i]); + + if (!sta->tdls) + goto exit; + + if (ath10k_mac_tdls_vif_stations_count(hw, vif)) + goto exit; + + /* This was the last tdls peer in current vif */ + ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, + WMI_TDLS_DISABLE); + if (ret) { + ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n", + arvif->vdev_id, ret); + } } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC && (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_MESH_POINT || vif->type == NL80211_IFTYPE_ADHOC)) { /* * New association. */ - ret = ath10k_station_assoc(ar, arvif, sta); + ath10k_dbg(ar, ATH10K_DBG_STA, "mac sta %pM associated\n", + sta->addr); + + ret = ath10k_station_assoc(ar, vif, sta, false); if (ret) - ath10k_warn("Failed to associate station: %pM\n", - sta->addr); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Station %pM moved to assoc state\n", - sta->addr); + ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n", + sta->addr, arvif->vdev_id, ret); } else if (old_state == IEEE80211_STA_ASSOC && - new_state == IEEE80211_STA_AUTH && - (vif->type == NL80211_IFTYPE_AP || - vif->type == NL80211_IFTYPE_ADHOC)) { + new_state == IEEE80211_STA_AUTHORIZED && + sta->tdls) { + /* + * Tdls station authorized. + */ + ath10k_dbg(ar, ATH10K_DBG_STA, "mac tdls sta %pM authorized\n", + sta->addr); + + ret = ath10k_station_assoc(ar, vif, sta, false); + if (ret) { + ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n", + sta->addr, arvif->vdev_id, ret); + goto exit; + } + + ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta, + WMI_TDLS_PEER_STATE_CONNECTED); + if (ret) + ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n", + sta->addr, arvif->vdev_id, ret); + } else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTH && + (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_MESH_POINT || + vif->type == NL80211_IFTYPE_ADHOC)) { /* * Disassociation. */ - ret = ath10k_station_disassoc(ar, arvif, sta); + ath10k_dbg(ar, ATH10K_DBG_STA, "mac sta %pM disassociated\n", + sta->addr); + + ret = ath10k_station_disassoc(ar, vif, sta); if (ret) - ath10k_warn("Failed to disassociate station: %pM\n", - sta->addr); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Station %pM moved to disassociated state\n", - sta->addr); + ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n", + sta->addr, arvif->vdev_id, ret); } - +exit: mutex_unlock(&ar->conf_mutex); return ret; } static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, - u16 ac, bool enable) + u16 ac, bool enable) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct wmi_sta_uapsd_auto_trig_arg arg = {}; + u32 prio = 0, acc = 0; u32 value = 0; int ret = 0; + lockdep_assert_held(&ar->conf_mutex); + if (arvif->vdev_type != WMI_VDEV_TYPE_STA) return 0; @@ -2370,18 +7768,26 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, case IEEE80211_AC_VO: value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN | WMI_STA_PS_UAPSD_AC3_TRIGGER_EN; + prio = 7; + acc = 3; break; case IEEE80211_AC_VI: value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN | WMI_STA_PS_UAPSD_AC2_TRIGGER_EN; + prio = 5; + acc = 2; break; case IEEE80211_AC_BE: value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN | WMI_STA_PS_UAPSD_AC1_TRIGGER_EN; + prio = 2; + acc = 1; break; case IEEE80211_AC_BK: value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN | WMI_STA_PS_UAPSD_AC0_TRIGGER_EN; + prio = 0; + acc = 0; break; } @@ -2394,7 +7800,7 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, WMI_STA_PS_PARAM_UAPSD, arvif->u.sta.uapsd); if (ret) { - ath10k_warn("could not set uapsd params %d\n", ret); + ath10k_warn(ar, "failed to set uapsd params: %d\n", ret); goto exit; } @@ -2407,17 +7813,56 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, WMI_STA_PS_PARAM_RX_WAKE_POLICY, value); if (ret) - ath10k_warn("could not set rx wake param %d\n", ret); + ath10k_warn(ar, "failed to set rx wake param: %d\n", ret); + + ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif); + if (ret) { + ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + ret = ath10k_mac_vif_recalc_ps_poll_count(arvif); + if (ret) { + ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) || + test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) { + /* Only userspace can make an educated decision when to send + * trigger frame. The following effectively disables u-UAPSD + * autotrigger in firmware (which is enabled by default + * provided the autotrigger service is available). + */ + + arg.wmm_ac = acc; + arg.user_priority = prio; + arg.service_interval = 0; + arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; + arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; + + ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id, + arvif->bssid, &arg, 1); + if (ret) { + ath10k_warn(ar, "failed to set uapsd auto trigger %d\n", + ret); + return ret; + } + } exit: return ret; } static int ath10k_conf_tx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, u16 ac, + struct ieee80211_vif *vif, + unsigned int link_id, u16 ac, const struct ieee80211_tx_queue_params *params) { struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct wmi_wmm_params_arg *p = NULL; int ret; @@ -2425,16 +7870,16 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw, switch (ac) { case IEEE80211_AC_VO: - p = &ar->wmm_params.ac_vo; + p = &arvif->wmm_params.ac_vo; break; case IEEE80211_AC_VI: - p = &ar->wmm_params.ac_vi; + p = &arvif->wmm_params.ac_vi; break; case IEEE80211_AC_BE: - p = &ar->wmm_params.ac_be; + p = &arvif->wmm_params.ac_be; break; case IEEE80211_AC_BK: - p = &ar->wmm_params.ac_bk; + p = &arvif->wmm_params.ac_bk; break; } @@ -2454,24 +7899,34 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw, */ p->txop = params->txop * 32; - /* FIXME: FW accepts wmm params per hw, not per vif */ - ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params); - if (ret) { - ath10k_warn("could not set wmm params %d\n", ret); - goto exit; + if (ar->wmi.ops->gen_vdev_wmm_conf) { + ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id, + &arvif->wmm_params); + if (ret) { + ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n", + arvif->vdev_id, ret); + goto exit; + } + } else { + /* This won't work well with multi-interface cases but it's + * better than nothing. + */ + ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params); + if (ret) { + ath10k_warn(ar, "failed to set wmm params: %d\n", ret); + goto exit; + } } ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd); if (ret) - ath10k_warn("could not set sta uapsd %d\n", ret); + ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret); exit: mutex_unlock(&ar->conf_mutex); return ret; } -#define ATH10K_ROC_TIMEOUT_HZ (2*HZ) - static int ath10k_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel *chan, @@ -2479,72 +7934,111 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw, enum ieee80211_roc_type type) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); - struct wmi_start_scan_arg arg; - int ret; + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct wmi_start_scan_arg *arg = NULL; + int ret = 0; + u32 scan_time_msec; mutex_lock(&ar->conf_mutex); - spin_lock_bh(&ar->data_lock); - if (ar->scan.in_progress) { - spin_unlock_bh(&ar->data_lock); + if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) { ret = -EBUSY; goto exit; } - INIT_COMPLETION(ar->scan.started); - INIT_COMPLETION(ar->scan.completed); - INIT_COMPLETION(ar->scan.on_channel); - ar->scan.in_progress = true; - ar->scan.aborting = false; - ar->scan.is_roc = true; - ar->scan.vdev_id = arvif->vdev_id; - ar->scan.roc_freq = chan->center_freq; + spin_lock_bh(&ar->data_lock); + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + reinit_completion(&ar->scan.started); + reinit_completion(&ar->scan.completed); + reinit_completion(&ar->scan.on_channel); + ar->scan.state = ATH10K_SCAN_STARTING; + ar->scan.is_roc = true; + ar->scan.vdev_id = arvif->vdev_id; + ar->scan.roc_freq = chan->center_freq; + ar->scan.roc_notify = true; + ret = 0; + break; + case ATH10K_SCAN_STARTING: + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: + ret = -EBUSY; + break; + } spin_unlock_bh(&ar->data_lock); - memset(&arg, 0, sizeof(arg)); - ath10k_wmi_start_scan_init(ar, &arg); - arg.vdev_id = arvif->vdev_id; - arg.scan_id = ATH10K_SCAN_ID; - arg.n_channels = 1; - arg.channels[0] = chan->center_freq; - arg.dwell_time_active = duration; - arg.dwell_time_passive = duration; - arg.max_scan_time = 2 * duration; - arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; - arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; - - ret = ath10k_start_scan(ar, &arg); - if (ret) { - ath10k_warn("could not start roc scan (%d)\n", ret); + if (ret) + goto exit; + + scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2; + + arg = kzalloc(sizeof(*arg), GFP_KERNEL); + if (!arg) { + ret = -ENOMEM; + goto exit; + } + + ath10k_wmi_start_scan_init(ar, arg); + arg->vdev_id = arvif->vdev_id; + arg->scan_id = ATH10K_SCAN_ID; + arg->n_channels = 1; + arg->channels[0] = chan->center_freq; + arg->dwell_time_active = scan_time_msec; + arg->dwell_time_passive = scan_time_msec; + arg->max_scan_time = scan_time_msec; + arg->scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; + arg->scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; + arg->burst_duration_ms = duration; + + ret = ath10k_start_scan(ar, arg); + if (ret) { + ath10k_warn(ar, "failed to start roc scan: %d\n", ret); spin_lock_bh(&ar->data_lock); - ar->scan.in_progress = false; + ar->scan.state = ATH10K_SCAN_IDLE; spin_unlock_bh(&ar->data_lock); goto exit; } - ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ); + ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ); if (ret == 0) { - ath10k_warn("could not switch to channel for roc scan\n"); - ath10k_abort_scan(ar); + ath10k_warn(ar, "failed to switch to channel for roc scan\n"); + + ret = ath10k_scan_stop(ar); + if (ret) + ath10k_warn(ar, "failed to stop scan: %d\n", ret); + ret = -ETIMEDOUT; goto exit; } + ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, + msecs_to_jiffies(duration)); + ret = 0; exit: + kfree(arg); + mutex_unlock(&ar->conf_mutex); return ret; } -static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw) +static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct ath10k *ar = hw->priv; mutex_lock(&ar->conf_mutex); - ath10k_abort_scan(ar); + + spin_lock_bh(&ar->data_lock); + ar->scan.roc_notify = false; + spin_unlock_bh(&ar->data_lock); + + ath10k_scan_abort(ar); + mutex_unlock(&ar->conf_mutex); + cancel_delayed_work_sync(&ar->scan.timeout); + return 0; } @@ -2552,144 +8046,1493 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw) * Both RTS and Fragmentation threshold are interface-specific * in ath10k, but device-specific in mac80211. */ -static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif) + +static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx, + u32 value) { - struct ath10k_generic_iter *ar_iter = data; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); - u32 rts = ar_iter->ar->hw->wiphy->rts_threshold; + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif; + int ret = 0; - rts = min_t(u32, rts, ATH10K_RTS_MAX); + mutex_lock(&ar->conf_mutex); + list_for_each_entry(arvif, &ar->arvifs, list) { + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n", + arvif->vdev_id, value); - ar_iter->ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id, - WMI_VDEV_PARAM_RTS_THRESHOLD, - rts); - if (ar_iter->ret) - ath10k_warn("Failed to set RTS threshold for VDEV: %d\n", - arvif->vdev_id); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Set RTS threshold: %d for VDEV: %d\n", - rts, arvif->vdev_id); + ret = ath10k_mac_set_rts(arvif, value); + if (ret) { + ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", + arvif->vdev_id, ret); + break; + } + } + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, + int radio_idx, u32 value) +{ + /* Even though there's a WMI enum for fragmentation threshold no known + * firmware actually implements it. Moreover it is not possible to rely + * frame fragmentation to mac80211 because firmware clears the "more + * fragments" bit in frame control making it impossible for remote + * devices to reassemble frames. + * + * Hence implement a dummy callback just to say fragmentation isn't + * supported. This effectively prevents mac80211 from doing frame + * fragmentation in software. + */ + return -EOPNOTSUPP; +} + +void ath10k_mac_wait_tx_complete(struct ath10k *ar) +{ + bool skip; + long time_left; + + /* mac80211 doesn't care if we really xmit queued frames or not + * we'll collect those frames either way if we stop/delete vdevs + */ + + if (ar->state == ATH10K_STATE_WEDGED) + return; + + time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({ + bool empty; + + spin_lock_bh(&ar->htt.tx_lock); + empty = (ar->htt.num_pending_tx == 0); + spin_unlock_bh(&ar->htt.tx_lock); + + skip = (ar->state == ATH10K_STATE_WEDGED) || + test_bit(ATH10K_FLAG_CRASH_FLUSH, + &ar->dev_flags); + + (empty || skip); + }), ATH10K_FLUSH_TIMEOUT_HZ); + + if (time_left == 0 || skip) + ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n", + skip, ar->state, time_left); } -static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) { - struct ath10k_generic_iter ar_iter; struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif; + u32 bitmap; + + if (drop) { + if (vif && vif->type == NL80211_IFTYPE_STATION) { + bitmap = ~(1 << WMI_MGMT_TID); + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->vdev_type == WMI_VDEV_TYPE_STA) + ath10k_wmi_peer_flush(ar, arvif->vdev_id, + arvif->bssid, bitmap); + } + ath10k_htt_flush_tx(&ar->htt); + } + return; + } + + mutex_lock(&ar->conf_mutex); + ath10k_mac_wait_tx_complete(ar); + mutex_unlock(&ar->conf_mutex); +} + +/* TODO: Implement this function properly + * For now it is needed to reply to Probe Requests in IBSS mode. + * Probably we need this information from FW. + */ +static int ath10k_tx_last_beacon(struct ieee80211_hw *hw) +{ + return 1; +} + +static void ath10k_reconfig_complete(struct ieee80211_hw *hw, + enum ieee80211_reconfig_type reconfig_type) +{ + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif; + + if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) + return; + + mutex_lock(&ar->conf_mutex); + + /* If device failed to restart it will be in a different state, e.g. + * ATH10K_STATE_WEDGED + */ + if (ar->state == ATH10K_STATE_RESTARTED) { + ath10k_info(ar, "device successfully recovered\n"); + ar->state = ATH10K_STATE_ON; + ieee80211_wake_queues(ar->hw); + + /* Clear recovery state. */ + complete(&ar->driver_recovery); + atomic_set(&ar->fail_cont_count, 0); + atomic_set(&ar->pending_recovery, 0); + + if (ar->hw_params.hw_restart_disconnect) { + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_STA) + ieee80211_hw_restart_disconnect(arvif->vif); + } + } + } + + mutex_unlock(&ar->conf_mutex); +} + +static void +ath10k_mac_update_bss_chan_survey(struct ath10k *ar, + struct ieee80211_channel *channel) +{ + int ret; + enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ; + + lockdep_assert_held(&ar->conf_mutex); + + if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) || + (ar->rx_channel != channel)) + return; + + if (ar->scan.state != ATH10K_SCAN_IDLE) { + ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n"); + return; + } + + reinit_completion(&ar->bss_survey_done); + + ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type); + if (ret) { + ath10k_warn(ar, "failed to send pdev bss chan info request\n"); + return; + } + + ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ); + if (!ret) { + ath10k_warn(ar, "bss channel survey timed out\n"); + return; + } +} + +static int ath10k_get_survey(struct ieee80211_hw *hw, int idx, + struct survey_info *survey) +{ + struct ath10k *ar = hw->priv; + struct ieee80211_supported_band *sband; + struct survey_info *ar_survey = &ar->survey[idx]; + int ret = 0; + + mutex_lock(&ar->conf_mutex); + + sband = hw->wiphy->bands[NL80211_BAND_2GHZ]; + if (sband && idx >= sband->n_channels) { + idx -= sband->n_channels; + sband = NULL; + } + + if (!sband) + sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; + + if (!sband || idx >= sband->n_channels) { + ret = -ENOENT; + goto exit; + } + + ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]); + + spin_lock_bh(&ar->data_lock); + memcpy(survey, ar_survey, sizeof(*survey)); + spin_unlock_bh(&ar->data_lock); + + survey->channel = &sband->channels[idx]; + + if (ar->rx_channel == survey->channel) + survey->filled |= SURVEY_INFO_IN_USE; + +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static bool +ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar, + enum nl80211_band band, + const struct cfg80211_bitrate_mask *mask, + int *nss) +{ + struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; + u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); + u8 ht_nss_mask = 0; + u8 vht_nss_mask = 0; + int i; + + if (mask->control[band].legacy) + return false; + + for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { + if (mask->control[band].ht_mcs[i] == 0) + continue; + else if (mask->control[band].ht_mcs[i] == + sband->ht_cap.mcs.rx_mask[i]) + ht_nss_mask |= BIT(i); + else + return false; + } + + for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { + if (mask->control[band].vht_mcs[i] == 0) + continue; + else if (mask->control[band].vht_mcs[i] == + ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i)) + vht_nss_mask |= BIT(i); + else + return false; + } + + if (ht_nss_mask != vht_nss_mask) + return false; + + if (ht_nss_mask == 0) + return false; + + if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask) + return false; + + *nss = fls(ht_nss_mask); + + return true; +} + +static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif, + u8 rate, u8 nss, u8 sgi, u8 ldpc) +{ + struct ath10k *ar = arvif->ar; + u32 vdev_param; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n", + arvif->vdev_id, rate, nss, sgi); + + vdev_param = ar->wmi.vdev_param->fixed_rate; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate); + if (ret) { + ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n", + rate, ret); + return ret; + } + + vdev_param = ar->wmi.vdev_param->nss; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss); + if (ret) { + ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret); + return ret; + } + + vdev_param = ar->wmi.vdev_param->sgi; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi); + if (ret) { + ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret); + return ret; + } + + vdev_param = ar->wmi.vdev_param->ldpc; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc); + if (ret) { + ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret); + return ret; + } + + return 0; +} + +static bool +ath10k_mac_can_set_bitrate_mask(struct ath10k *ar, + enum nl80211_band band, + const struct cfg80211_bitrate_mask *mask, + bool allow_pfr) +{ + int i; + u16 vht_mcs; + + /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible + * to express all VHT MCS rate masks. Effectively only the following + * ranges can be used: none, 0-7, 0-8 and 0-9. + */ + for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { + vht_mcs = mask->control[band].vht_mcs[i]; + + switch (vht_mcs) { + case 0: + case BIT(8) - 1: + case BIT(9) - 1: + case BIT(10) - 1: + break; + default: + if (!allow_pfr) + ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n"); + return false; + } + } + + return true; +} + +static bool ath10k_mac_set_vht_bitrate_mask_fixup(struct ath10k *ar, + struct ath10k_vif *arvif, + struct ieee80211_sta *sta) +{ + int err; + u8 rate = arvif->vht_pfr; + + /* skip non vht and multiple rate peers */ + if (!sta->deflink.vht_cap.vht_supported || arvif->vht_num_rates != 1) + return false; + + err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, + WMI_PEER_PARAM_FIXED_RATE, rate); + if (err) + ath10k_warn(ar, "failed to enable STA %pM peer fixed rate: %d\n", + sta->addr, err); + + return true; +} + +static void ath10k_mac_set_bitrate_mask_iter(void *data, + struct ieee80211_sta *sta) +{ + struct ath10k_vif *arvif = data; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k *ar = arvif->ar; + + if (arsta->arvif != arvif) + return; + + if (ath10k_mac_set_vht_bitrate_mask_fixup(ar, arvif, sta)) + return; + + spin_lock_bh(&ar->data_lock); + arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED; + spin_unlock_bh(&ar->data_lock); + + ieee80211_queue_work(ar->hw, &arsta->update_wk); +} + +static void ath10k_mac_clr_bitrate_mask_iter(void *data, + struct ieee80211_sta *sta) +{ + struct ath10k_vif *arvif = data; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k *ar = arvif->ar; + int err; + + /* clear vht peers only */ + if (arsta->arvif != arvif || !sta->deflink.vht_cap.vht_supported) + return; + + err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, + WMI_PEER_PARAM_FIXED_RATE, + WMI_FIXED_RATE_NONE); + if (err) + ath10k_warn(ar, "failed to clear STA %pM peer fixed rate: %d\n", + sta->addr, err); +} + +static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const struct cfg80211_bitrate_mask *mask) +{ + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct cfg80211_chan_def def; + struct ath10k *ar = arvif->ar; + enum nl80211_band band; + const u8 *ht_mcs_mask; + const u16 *vht_mcs_mask; + u8 rate; + u8 nss; + u8 sgi; + u8 ldpc; + int single_nss; + int ret; + int vht_num_rates, allow_pfr; + u8 vht_pfr; + bool update_bitrate_mask = true; + + if (ath10k_mac_vif_chan(vif, &def)) + return -EPERM; + + band = def.chan->band; + ht_mcs_mask = mask->control[band].ht_mcs; + vht_mcs_mask = mask->control[band].vht_mcs; + ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC); + + sgi = mask->control[band].gi; + if (sgi == NL80211_TXRATE_FORCE_LGI) + return -EINVAL; + + allow_pfr = test_bit(ATH10K_FW_FEATURE_PEER_FIXED_RATE, + ar->normal_mode_fw.fw_file.fw_features); + if (allow_pfr) { + mutex_lock(&ar->conf_mutex); + ieee80211_iterate_stations_atomic(ar->hw, + ath10k_mac_clr_bitrate_mask_iter, + arvif); + mutex_unlock(&ar->conf_mutex); + } + + if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask, + &vht_num_rates)) { + ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask, + &rate, &nss, + false); + if (ret) { + ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask, + &single_nss)) { + rate = WMI_FIXED_RATE_NONE; + nss = single_nss; + } else { + rate = WMI_FIXED_RATE_NONE; + nss = min(ar->num_rf_chains, + max(ath10k_mac_max_ht_nss(ht_mcs_mask), + ath10k_mac_max_vht_nss(vht_mcs_mask))); + + if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask, + allow_pfr)) { + u8 vht_nss; + + if (!allow_pfr || vht_num_rates != 1) + return -EINVAL; - memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter)); - ar_iter.ar = ar; + /* Reach here, firmware supports peer fixed rate and has + * single vht rate, and don't update vif birate_mask, as + * the rate only for specific peer. + */ + ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask, + &vht_pfr, + &vht_nss, + true); + update_bitrate_mask = false; + } else { + vht_pfr = 0; + } + + mutex_lock(&ar->conf_mutex); + + if (update_bitrate_mask) + arvif->bitrate_mask = *mask; + arvif->vht_num_rates = vht_num_rates; + arvif->vht_pfr = vht_pfr; + ieee80211_iterate_stations_atomic(ar->hw, + ath10k_mac_set_bitrate_mask_iter, + arvif); + + mutex_unlock(&ar->conf_mutex); + } mutex_lock(&ar->conf_mutex); - ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL, - ath10k_set_rts_iter, &ar_iter); + + ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc); + if (ret) { + ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n", + arvif->vdev_id, ret); + goto exit; + } + +exit: mutex_unlock(&ar->conf_mutex); - return ar_iter.ret; + return ret; } -static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif) +static void ath10k_sta_rc_update(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta, + u32 changed) { - struct ath10k_generic_iter *ar_iter = data; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); - u32 frag = ar_iter->ar->hw->wiphy->frag_threshold; + struct ieee80211_sta *sta = link_sta->sta; + struct ath10k *ar = hw->priv; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct ath10k_peer *peer; + u32 bw, smps; + + spin_lock_bh(&ar->data_lock); + + peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr); + if (!peer) { + spin_unlock_bh(&ar->data_lock); + ath10k_warn(ar, "mac sta rc update failed to find peer %pM on vdev %i\n", + sta->addr, arvif->vdev_id); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_STA, + "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", + sta->addr, changed, sta->deflink.bandwidth, + sta->deflink.rx_nss, + sta->deflink.smps_mode); + + if (changed & IEEE80211_RC_BW_CHANGED) { + bw = WMI_PEER_CHWIDTH_20MHZ; + + switch (sta->deflink.bandwidth) { + case IEEE80211_STA_RX_BW_20: + bw = WMI_PEER_CHWIDTH_20MHZ; + break; + case IEEE80211_STA_RX_BW_40: + bw = WMI_PEER_CHWIDTH_40MHZ; + break; + case IEEE80211_STA_RX_BW_80: + bw = WMI_PEER_CHWIDTH_80MHZ; + break; + case IEEE80211_STA_RX_BW_160: + bw = WMI_PEER_CHWIDTH_160MHZ; + break; + default: + ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n", + sta->deflink.bandwidth, sta->addr); + bw = WMI_PEER_CHWIDTH_20MHZ; + break; + } + + arsta->bw = bw; + } + + if (changed & IEEE80211_RC_NSS_CHANGED) + arsta->nss = sta->deflink.rx_nss; + + if (changed & IEEE80211_RC_SMPS_CHANGED) { + smps = WMI_PEER_SMPS_PS_NONE; + + switch (sta->deflink.smps_mode) { + case IEEE80211_SMPS_AUTOMATIC: + case IEEE80211_SMPS_OFF: + smps = WMI_PEER_SMPS_PS_NONE; + break; + case IEEE80211_SMPS_STATIC: + smps = WMI_PEER_SMPS_STATIC; + break; + case IEEE80211_SMPS_DYNAMIC: + smps = WMI_PEER_SMPS_DYNAMIC; + break; + case IEEE80211_SMPS_NUM_MODES: + ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n", + sta->deflink.smps_mode, sta->addr); + smps = WMI_PEER_SMPS_PS_NONE; + break; + } + + arsta->smps = smps; + } + + arsta->changed |= changed; + + spin_unlock_bh(&ar->data_lock); + + ieee80211_queue_work(hw, &arsta->update_wk); +} + +static void ath10k_offset_tsf(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, s64 tsf_offset) +{ + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = (void *)vif->drv_priv; + u32 offset, vdev_param; int ret; - frag = clamp_t(u32, frag, - ATH10K_FRAGMT_THRESHOLD_MIN, - ATH10K_FRAGMT_THRESHOLD_MAX); + if (tsf_offset < 0) { + vdev_param = ar->wmi.vdev_param->dec_tsf; + offset = -tsf_offset; + } else { + vdev_param = ar->wmi.vdev_param->inc_tsf; + offset = tsf_offset; + } - ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id, - WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD, - frag); + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, + vdev_param, offset); - ar_iter->ret = ret; - if (ar_iter->ret) - ath10k_warn("Failed to set frag threshold for VDEV: %d\n", - arvif->vdev_id); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Set frag threshold: %d for VDEV: %d\n", - frag, arvif->vdev_id); + if (ret && ret != -EOPNOTSUPP) + ath10k_warn(ar, "failed to set tsf offset %d cmd %d: %d\n", + offset, vdev_param, ret); } -static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value) +static int ath10k_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params) { - struct ath10k_generic_iter ar_iter; struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct ieee80211_sta *sta = params->sta; + enum ieee80211_ampdu_mlme_action action = params->action; + u16 tid = params->tid; + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %u action %d\n", + arvif->vdev_id, sta->addr, tid, action); + + switch (action) { + case IEEE80211_AMPDU_RX_START: + case IEEE80211_AMPDU_RX_STOP: + /* HTT AddBa/DelBa events trigger mac80211 Rx BA session + * creation/removal. Do we need to verify this? + */ + return 0; + case IEEE80211_AMPDU_TX_START: + case IEEE80211_AMPDU_TX_STOP_CONT: + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + case IEEE80211_AMPDU_TX_OPERATIONAL: + /* Firmware offloads Tx aggregation entirely so deny mac80211 + * Tx aggregation requests. + */ + return -EOPNOTSUPP; + } + + return -EINVAL; +} + +static void +ath10k_mac_update_rx_channel(struct ath10k *ar, + struct ieee80211_chanctx_conf *ctx, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs) +{ + struct cfg80211_chan_def *def = NULL; + + /* Both locks are required because ar->rx_channel is modified. This + * allows readers to hold either lock. + */ + lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_held(&ar->data_lock); + + WARN_ON(ctx && vifs); + WARN_ON(vifs && !n_vifs); + + /* FIXME: Sort of an optimization and a workaround. Peers and vifs are + * on a linked list now. Doing a lookup peer -> vif -> chanctx for each + * ppdu on Rx may reduce performance on low-end systems. It should be + * possible to make tables/hashmaps to speed the lookup up (be vary of + * cpu data cache lines though regarding sizes) but to keep the initial + * implementation simple and less intrusive fallback to the slow lookup + * only for multi-channel cases. Single-channel cases will remain to + * use the old channel derival and thus performance should not be + * affected much. + */ + rcu_read_lock(); + if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) { + ieee80211_iter_chan_contexts_atomic(ar->hw, + ath10k_mac_get_any_chandef_iter, + &def); + + if (vifs) + def = &vifs[0].new_ctx->def; + + ar->rx_channel = def->chan; + } else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) || + (ctx && (ar->state == ATH10K_STATE_RESTARTED))) { + /* During driver restart due to firmware assert, since mac80211 + * already has valid channel context for given radio, channel + * context iteration return num_chanctx > 0. So fix rx_channel + * when restart is in progress. + */ + ar->rx_channel = ctx->def.chan; + } else { + ar->rx_channel = NULL; + } + rcu_read_unlock(); +} + +static void +ath10k_mac_update_vif_chan(struct ath10k *ar, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs) +{ + struct ath10k_vif *arvif; + int ret; + int i; + + lockdep_assert_held(&ar->conf_mutex); + + /* First stop monitor interface. Some FW versions crash if there's a + * lone monitor interface. + */ + if (ar->monitor_started) + ath10k_monitor_stop(ar); - memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter)); - ar_iter.ar = ar; + for (i = 0; i < n_vifs; i++) { + arvif = (void *)vifs[i].vif->drv_priv; + + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n", + arvif->vdev_id, + vifs[i].old_ctx->def.chan->center_freq, + vifs[i].new_ctx->def.chan->center_freq, + vifs[i].old_ctx->def.width, + vifs[i].new_ctx->def.width); + + if (WARN_ON(!arvif->is_started)) + continue; + + if (WARN_ON(!arvif->is_up)) + continue; + + ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); + if (ret) { + ath10k_warn(ar, "failed to down vdev %d: %d\n", + arvif->vdev_id, ret); + continue; + } + } + + /* All relevant vdevs are downed and associated channel resources + * should be available for the channel switch now. + */ + + spin_lock_bh(&ar->data_lock); + ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs); + spin_unlock_bh(&ar->data_lock); + + for (i = 0; i < n_vifs; i++) { + arvif = (void *)vifs[i].vif->drv_priv; + + if (WARN_ON(!arvif->is_started)) + continue; + + if (WARN_ON(!arvif->is_up)) + continue; + + ret = ath10k_mac_setup_bcn_tmpl(arvif); + if (ret) + ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n", + ret); + + ret = ath10k_mac_setup_prb_tmpl(arvif); + if (ret) + ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n", + ret); + + ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def); + if (ret) { + ath10k_warn(ar, "failed to restart vdev %d: %d\n", + arvif->vdev_id, ret); + continue; + } + + ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, + arvif->bssid); + if (ret) { + ath10k_warn(ar, "failed to bring vdev up %d: %d\n", + arvif->vdev_id, ret); + continue; + } + } + + ath10k_monitor_recalc(ar); +} + +static int +ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) +{ + struct ath10k *ar = hw->priv; + + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac chanctx add freq %u width %d ptr %p\n", + ctx->def.chan->center_freq, ctx->def.width, ctx); mutex_lock(&ar->conf_mutex); - ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL, - ath10k_set_frag_iter, &ar_iter); + + spin_lock_bh(&ar->data_lock); + ath10k_mac_update_rx_channel(ar, ctx, NULL, 0); + spin_unlock_bh(&ar->data_lock); + + ath10k_recalc_radar_detection(ar); + ath10k_monitor_recalc(ar); + mutex_unlock(&ar->conf_mutex); - return ar_iter.ret; + return 0; } -static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) +static void +ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) { struct ath10k *ar = hw->priv; + + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac chanctx remove freq %u width %d ptr %p\n", + ctx->def.chan->center_freq, ctx->def.width, ctx); + + mutex_lock(&ar->conf_mutex); + + spin_lock_bh(&ar->data_lock); + ath10k_mac_update_rx_channel(ar, NULL, NULL, 0); + spin_unlock_bh(&ar->data_lock); + + ath10k_recalc_radar_detection(ar); + ath10k_monitor_recalc(ar); + + mutex_unlock(&ar->conf_mutex); +} + +struct ath10k_mac_change_chanctx_arg { + struct ieee80211_chanctx_conf *ctx; + struct ieee80211_vif_chanctx_switch *vifs; + int n_vifs; + int next_vif; +}; + +static void +ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct ath10k_mac_change_chanctx_arg *arg = data; + + if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx) + return; + + arg->n_vifs++; +} + +static void +ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct ath10k_mac_change_chanctx_arg *arg = data; + struct ieee80211_chanctx_conf *ctx; + + ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf); + if (ctx != arg->ctx) + return; + + if (WARN_ON(arg->next_vif == arg->n_vifs)) + return; + + arg->vifs[arg->next_vif].vif = vif; + arg->vifs[arg->next_vif].old_ctx = ctx; + arg->vifs[arg->next_vif].new_ctx = ctx; + arg->next_vif++; +} + +static void +ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx, + u32 changed) +{ + struct ath10k *ar = hw->priv; + struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx }; + + mutex_lock(&ar->conf_mutex); + + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac chanctx change freq %u width %d ptr %p changed %x\n", + ctx->def.chan->center_freq, ctx->def.width, ctx, changed); + + /* This shouldn't really happen because channel switching should use + * switch_vif_chanctx(). + */ + if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL)) + goto unlock; + + if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) { + ieee80211_iterate_active_interfaces_atomic( + hw, + ATH10K_ITER_NORMAL_FLAGS, + ath10k_mac_change_chanctx_cnt_iter, + &arg); + if (arg.n_vifs == 0) + goto radar; + + arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), + GFP_KERNEL); + if (!arg.vifs) + goto radar; + + ieee80211_iterate_active_interfaces_atomic( + hw, + ATH10K_ITER_NORMAL_FLAGS, + ath10k_mac_change_chanctx_fill_iter, + &arg); + ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs); + kfree(arg.vifs); + } + +radar: + ath10k_recalc_radar_detection(ar); + + /* FIXME: How to configure Rx chains properly? */ + + /* No other actions are actually necessary. Firmware maintains channel + * definitions per vdev internally and there's no host-side channel + * context abstraction to configure, e.g. channel width. + */ + +unlock: + mutex_unlock(&ar->conf_mutex); +} + +static int +ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx) +{ + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = (void *)vif->drv_priv; int ret; - /* mac80211 doesn't care if we really xmit queued frames or not - * we'll collect those frames either way if we stop/delete vdevs */ - if (drop) + mutex_lock(&ar->conf_mutex); + + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac chanctx assign ptr %p vdev_id %i\n", + ctx, arvif->vdev_id); + + if (WARN_ON(arvif->is_started)) { + mutex_unlock(&ar->conf_mutex); + return -EBUSY; + } + + ret = ath10k_vdev_start(arvif, &ctx->def); + if (ret) { + ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n", + arvif->vdev_id, vif->addr, + ctx->def.chan->center_freq, ret); + goto err; + } + + arvif->is_started = true; + + ret = ath10k_mac_vif_setup_ps(arvif); + if (ret) { + ath10k_warn(ar, "failed to update vdev %i ps: %d\n", + arvif->vdev_id, ret); + goto err_stop; + } + + if (vif->type == NL80211_IFTYPE_MONITOR) { + ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr); + if (ret) { + ath10k_warn(ar, "failed to up monitor vdev %i: %d\n", + arvif->vdev_id, ret); + goto err_stop; + } + + arvif->is_up = true; + } + + if (ath10k_mac_can_set_cts_prot(arvif)) { + ret = ath10k_mac_set_cts_prot(arvif); + if (ret) + ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", + arvif->vdev_id, ret); + } + + if (ath10k_peer_stats_enabled(ar) && + ar->hw_params.tx_stats_over_pktlog) { + ar->pktlog_filter |= ATH10K_PKTLOG_PEER_STATS; + ret = ath10k_wmi_pdev_pktlog_enable(ar, + ar->pktlog_filter); + if (ret) { + ath10k_warn(ar, "failed to enable pktlog %d\n", ret); + goto err_stop; + } + } + + mutex_unlock(&ar->conf_mutex); + return 0; + +err_stop: + ath10k_vdev_stop(arvif); + arvif->is_started = false; + ath10k_mac_vif_setup_ps(arvif); + +err: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static void +ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx) +{ + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = (void *)vif->drv_priv; + int ret; + + mutex_lock(&ar->conf_mutex); + + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac chanctx unassign ptr %p vdev_id %i\n", + ctx, arvif->vdev_id); + + WARN_ON(!arvif->is_started); + + if (vif->type == NL80211_IFTYPE_MONITOR) { + WARN_ON(!arvif->is_up); + + ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); + if (ret) + ath10k_warn(ar, "failed to down monitor vdev %i: %d\n", + arvif->vdev_id, ret); + + arvif->is_up = false; + } + + ret = ath10k_vdev_stop(arvif); + if (ret) + ath10k_warn(ar, "failed to stop vdev %i: %d\n", + arvif->vdev_id, ret); + + arvif->is_started = false; + + mutex_unlock(&ar->conf_mutex); +} + +static int +ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs, + enum ieee80211_chanctx_switch_mode mode) +{ + struct ath10k *ar = hw->priv; + + mutex_lock(&ar->conf_mutex); + + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac chanctx switch n_vifs %d mode %d\n", + n_vifs, mode); + ath10k_mac_update_vif_chan(ar, vifs, n_vifs); + + mutex_unlock(&ar->conf_mutex); + return 0; +} + +static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct ath10k *ar; + struct ath10k_peer *peer; + + ar = hw->priv; + + list_for_each_entry(peer, &ar->peers, list) + if (peer->sta == sta) + peer->removed = true; +} + +/* HT MCS parameters with Nss = 1 */ +static const struct ath10k_index_ht_data_rate_type supported_ht_mcs_rate_nss1[] = { + /* MCS L20 L40 S20 S40 */ + {0, { 65, 135, 72, 150} }, + {1, { 130, 270, 144, 300} }, + {2, { 195, 405, 217, 450} }, + {3, { 260, 540, 289, 600} }, + {4, { 390, 810, 433, 900} }, + {5, { 520, 1080, 578, 1200} }, + {6, { 585, 1215, 650, 1350} }, + {7, { 650, 1350, 722, 1500} } +}; + +/* HT MCS parameters with Nss = 2 */ +static const struct ath10k_index_ht_data_rate_type supported_ht_mcs_rate_nss2[] = { + /* MCS L20 L40 S20 S40 */ + {0, {130, 270, 144, 300} }, + {1, {260, 540, 289, 600} }, + {2, {390, 810, 433, 900} }, + {3, {520, 1080, 578, 1200} }, + {4, {780, 1620, 867, 1800} }, + {5, {1040, 2160, 1156, 2400} }, + {6, {1170, 2430, 1300, 2700} }, + {7, {1300, 2700, 1444, 3000} } +}; + +/* MCS parameters with Nss = 1 */ +static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss1[] = { + /* MCS L80 S80 L40 S40 L20 S20 */ + {0, {293, 325}, {135, 150}, {65, 72} }, + {1, {585, 650}, {270, 300}, {130, 144} }, + {2, {878, 975}, {405, 450}, {195, 217} }, + {3, {1170, 1300}, {540, 600}, {260, 289} }, + {4, {1755, 1950}, {810, 900}, {390, 433} }, + {5, {2340, 2600}, {1080, 1200}, {520, 578} }, + {6, {2633, 2925}, {1215, 1350}, {585, 650} }, + {7, {2925, 3250}, {1350, 1500}, {650, 722} }, + {8, {3510, 3900}, {1620, 1800}, {780, 867} }, + {9, {3900, 4333}, {1800, 2000}, {865, 960} } +}; + +/*MCS parameters with Nss = 2 */ +static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss2[] = { + /* MCS L80 S80 L40 S40 L20 S20 */ + {0, {585, 650}, {270, 300}, {130, 144} }, + {1, {1170, 1300}, {540, 600}, {260, 289} }, + {2, {1755, 1950}, {810, 900}, {390, 433} }, + {3, {2340, 2600}, {1080, 1200}, {520, 578} }, + {4, {3510, 3900}, {1620, 1800}, {780, 867} }, + {5, {4680, 5200}, {2160, 2400}, {1040, 1156} }, + {6, {5265, 5850}, {2430, 2700}, {1170, 1300} }, + {7, {5850, 6500}, {2700, 3000}, {1300, 1444} }, + {8, {7020, 7800}, {3240, 3600}, {1560, 1733} }, + {9, {7800, 8667}, {3600, 4000}, {1730, 1920} } +}; + +static void ath10k_mac_get_rate_flags_ht(struct ath10k *ar, u32 rate, u8 nss, u8 mcs, + u8 *flags, u8 *bw) +{ + struct ath10k_index_ht_data_rate_type *mcs_rate; + u8 index; + size_t len_nss1 = ARRAY_SIZE(supported_ht_mcs_rate_nss1); + size_t len_nss2 = ARRAY_SIZE(supported_ht_mcs_rate_nss2); + + if (mcs >= (len_nss1 + len_nss2)) { + ath10k_warn(ar, "not supported mcs %d in current rate table", mcs); return; + } - ret = wait_event_timeout(ar->htt->empty_tx_wq, ({ - bool empty; - spin_lock_bh(&ar->htt->tx_lock); - empty = bitmap_empty(ar->htt->used_msdu_ids, - ar->htt->max_num_pending_tx); - spin_unlock_bh(&ar->htt->tx_lock); - (empty); - }), ATH10K_FLUSH_TIMEOUT_HZ); - if (ret <= 0) - ath10k_warn("tx not flushed\n"); + mcs_rate = (struct ath10k_index_ht_data_rate_type *) + ((nss == 1) ? &supported_ht_mcs_rate_nss1 : + &supported_ht_mcs_rate_nss2); + + if (mcs >= len_nss1) + index = mcs - len_nss1; + else + index = mcs; + + if (rate == mcs_rate[index].supported_rate[0]) { + *bw = RATE_INFO_BW_20; + } else if (rate == mcs_rate[index].supported_rate[1]) { + *bw |= RATE_INFO_BW_40; + } else if (rate == mcs_rate[index].supported_rate[2]) { + *bw |= RATE_INFO_BW_20; + *flags |= RATE_INFO_FLAGS_SHORT_GI; + } else if (rate == mcs_rate[index].supported_rate[3]) { + *bw |= RATE_INFO_BW_40; + *flags |= RATE_INFO_FLAGS_SHORT_GI; + } else { + ath10k_warn(ar, "invalid ht params rate %d 100kbps nss %d mcs %d", + rate, nss, mcs); + } } -/* TODO: Implement this function properly - * For now it is needed to reply to Probe Requests in IBSS mode. - * Propably we need this information from FW. - */ -static int ath10k_tx_last_beacon(struct ieee80211_hw *hw) +static void ath10k_mac_get_rate_flags_vht(struct ath10k *ar, u32 rate, u8 nss, u8 mcs, + u8 *flags, u8 *bw) { - return 1; + struct ath10k_index_vht_data_rate_type *mcs_rate; + + mcs_rate = (struct ath10k_index_vht_data_rate_type *) + ((nss == 1) ? &supported_vht_mcs_rate_nss1 : + &supported_vht_mcs_rate_nss2); + + if (rate == mcs_rate[mcs].supported_VHT80_rate[0]) { + *bw = RATE_INFO_BW_80; + } else if (rate == mcs_rate[mcs].supported_VHT80_rate[1]) { + *bw = RATE_INFO_BW_80; + *flags |= RATE_INFO_FLAGS_SHORT_GI; + } else if (rate == mcs_rate[mcs].supported_VHT40_rate[0]) { + *bw = RATE_INFO_BW_40; + } else if (rate == mcs_rate[mcs].supported_VHT40_rate[1]) { + *bw = RATE_INFO_BW_40; + *flags |= RATE_INFO_FLAGS_SHORT_GI; + } else if (rate == mcs_rate[mcs].supported_VHT20_rate[0]) { + *bw = RATE_INFO_BW_20; + } else if (rate == mcs_rate[mcs].supported_VHT20_rate[1]) { + *bw = RATE_INFO_BW_20; + *flags |= RATE_INFO_FLAGS_SHORT_GI; + } else { + ath10k_warn(ar, "invalid vht params rate %d 100kbps nss %d mcs %d", + rate, nss, mcs); + } +} + +static void ath10k_mac_get_rate_flags(struct ath10k *ar, u32 rate, + enum ath10k_phy_mode mode, u8 nss, u8 mcs, + u8 *flags, u8 *bw) +{ + if (mode == ATH10K_PHY_MODE_HT) { + *flags = RATE_INFO_FLAGS_MCS; + ath10k_mac_get_rate_flags_ht(ar, rate, nss, mcs, flags, bw); + } else if (mode == ATH10K_PHY_MODE_VHT) { + *flags = RATE_INFO_FLAGS_VHT_MCS; + ath10k_mac_get_rate_flags_vht(ar, rate, nss, mcs, flags, bw); + } +} + +static void ath10k_mac_parse_bitrate(struct ath10k *ar, u32 rate_code, + u32 bitrate_kbps, struct rate_info *rate) +{ + enum ath10k_phy_mode mode = ATH10K_PHY_MODE_LEGACY; + enum wmi_rate_preamble preamble = WMI_TLV_GET_HW_RC_PREAM_V1(rate_code); + u8 nss = WMI_TLV_GET_HW_RC_NSS_V1(rate_code) + 1; + u8 mcs = WMI_TLV_GET_HW_RC_RATE_V1(rate_code); + u8 flags = 0, bw = 0; + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac parse rate code 0x%x bitrate %d kbps\n", + rate_code, bitrate_kbps); + + if (preamble == WMI_RATE_PREAMBLE_HT) + mode = ATH10K_PHY_MODE_HT; + else if (preamble == WMI_RATE_PREAMBLE_VHT) + mode = ATH10K_PHY_MODE_VHT; + + ath10k_mac_get_rate_flags(ar, bitrate_kbps / 100, mode, nss, mcs, &flags, &bw); + + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac parse bitrate preamble %d mode %d nss %d mcs %d flags %x bw %d\n", + preamble, mode, nss, mcs, flags, bw); + + rate->flags = flags; + rate->bw = bw; + rate->legacy = bitrate_kbps / 100; + rate->nss = nss; + rate->mcs = mcs; +} + +static void ath10k_mac_sta_get_peer_stats_info(struct ath10k *ar, + struct ieee80211_sta *sta, + struct station_info *sinfo) +{ + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k_peer *peer; + unsigned long time_left; + int ret; + + if (!(ar->hw_params.supports_peer_stats_info && + arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA)) + return; + + spin_lock_bh(&ar->data_lock); + peer = ath10k_peer_find(ar, arsta->arvif->vdev_id, sta->addr); + spin_unlock_bh(&ar->data_lock); + if (!peer) + return; + + reinit_completion(&ar->peer_stats_info_complete); + + ret = ath10k_wmi_request_peer_stats_info(ar, + arsta->arvif->vdev_id, + WMI_REQUEST_ONE_PEER_STATS_INFO, + arsta->arvif->bssid, + 0); + if (ret && ret != -EOPNOTSUPP) { + ath10k_warn(ar, "could not request peer stats info: %d\n", ret); + return; + } + + time_left = wait_for_completion_timeout(&ar->peer_stats_info_complete, 3 * HZ); + if (time_left == 0) { + ath10k_warn(ar, "timed out waiting peer stats info\n"); + return; + } + + if (arsta->rx_rate_code != 0 && arsta->rx_bitrate_kbps != 0) { + ath10k_mac_parse_bitrate(ar, arsta->rx_rate_code, + arsta->rx_bitrate_kbps, + &sinfo->rxrate); + + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); + arsta->rx_rate_code = 0; + arsta->rx_bitrate_kbps = 0; + } + + if (arsta->tx_rate_code != 0 && arsta->tx_bitrate_kbps != 0) { + ath10k_mac_parse_bitrate(ar, arsta->tx_rate_code, + arsta->tx_bitrate_kbps, + &sinfo->txrate); + + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); + arsta->tx_rate_code = 0; + arsta->tx_bitrate_kbps = 0; + } +} + +static void ath10k_sta_statistics(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct station_info *sinfo) +{ + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k *ar = arsta->arvif->ar; + + if (!ath10k_peer_stats_enabled(ar)) + return; + + mutex_lock(&ar->conf_mutex); + ath10k_debug_fw_stats_request(ar); + mutex_unlock(&ar->conf_mutex); + + sinfo->rx_duration = arsta->rx_duration; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); + + if (arsta->txrate.legacy || arsta->txrate.nss) { + if (arsta->txrate.legacy) { + sinfo->txrate.legacy = arsta->txrate.legacy; + } else { + sinfo->txrate.mcs = arsta->txrate.mcs; + sinfo->txrate.nss = arsta->txrate.nss; + sinfo->txrate.bw = arsta->txrate.bw; + } + sinfo->txrate.flags = arsta->txrate.flags; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); + } + + if (ar->htt.disable_tx_comp) { + sinfo->tx_failed = arsta->tx_failed; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); + } + + sinfo->tx_retries = arsta->tx_retries; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); + + ath10k_mac_sta_get_peer_stats_info(ar, sta, sinfo); +} + +static int ath10k_mac_op_set_tid_config(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct cfg80211_tid_config *tid_config) +{ + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct ath10k_mac_iter_tid_conf_data data = {}; + struct wmi_per_peer_per_tid_cfg_arg arg = {}; + int ret, i; + + mutex_lock(&ar->conf_mutex); + arg.vdev_id = arvif->vdev_id; + + arvif->tids_rst = 0; + memset(arvif->tid_conf_changed, 0, sizeof(arvif->tid_conf_changed)); + + for (i = 0; i < tid_config->n_tid_conf; i++) { + ret = ath10k_mac_parse_tid_config(ar, sta, vif, + &tid_config->tid_conf[i], + &arg); + if (ret) + goto exit; + } + + ret = 0; + + if (sta) + goto exit; + + arvif->tids_rst = 0; + data.curr_vif = vif; + data.ar = ar; + + ieee80211_iterate_stations_atomic(hw, ath10k_mac_vif_stations_tid_conf, + &data); + +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static int ath10k_mac_op_reset_tid_config(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u8 tids) +{ + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct ath10k_mac_iter_tid_conf_data data = {}; + struct ath10k *ar = hw->priv; + int ret = 0; + + mutex_lock(&ar->conf_mutex); + + if (sta) { + arvif->tids_rst = 0; + ret = ath10k_mac_reset_tid_config(ar, sta, arvif, tids); + goto exit; + } + + arvif->tids_rst = tids; + data.curr_vif = vif; + data.ar = ar; + ieee80211_iterate_stations_atomic(hw, ath10k_mac_vif_stations_tid_conf, + &data); + +exit: + mutex_unlock(&ar->conf_mutex); + return ret; } static const struct ieee80211_ops ath10k_ops = { - .tx = ath10k_tx, + .tx = ath10k_mac_op_tx, + .wake_tx_queue = ath10k_mac_op_wake_tx_queue, .start = ath10k_start, .stop = ath10k_stop, .config = ath10k_config, .add_interface = ath10k_add_interface, + .update_vif_offload = ath10k_update_vif_offload, .remove_interface = ath10k_remove_interface, .configure_filter = ath10k_configure_filter, .bss_info_changed = ath10k_bss_info_changed, + .set_coverage_class = ath10k_mac_op_set_coverage_class, .hw_scan = ath10k_hw_scan, .cancel_hw_scan = ath10k_cancel_hw_scan, .set_key = ath10k_set_key, + .set_default_unicast_key = ath10k_set_default_unicast_key, .sta_state = ath10k_sta_state, + .sta_set_txpwr = ath10k_sta_set_txpwr, .conf_tx = ath10k_conf_tx, .remain_on_channel = ath10k_remain_on_channel, .cancel_remain_on_channel = ath10k_cancel_remain_on_channel, .set_rts_threshold = ath10k_set_rts_threshold, - .set_frag_threshold = ath10k_set_frag_threshold, + .set_frag_threshold = ath10k_mac_op_set_frag_threshold, .flush = ath10k_flush, .tx_last_beacon = ath10k_tx_last_beacon, + .set_antenna = ath10k_set_antenna, + .get_antenna = ath10k_get_antenna, + .reconfig_complete = ath10k_reconfig_complete, + .get_survey = ath10k_get_survey, + .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask, + .link_sta_rc_update = ath10k_sta_rc_update, + .offset_tsf = ath10k_offset_tsf, + .ampdu_action = ath10k_ampdu_action, + .get_et_sset_count = ath10k_debug_get_et_sset_count, + .get_et_stats = ath10k_debug_get_et_stats, + .get_et_strings = ath10k_debug_get_et_strings, + .add_chanctx = ath10k_mac_op_add_chanctx, + .remove_chanctx = ath10k_mac_op_remove_chanctx, + .change_chanctx = ath10k_mac_op_change_chanctx, + .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx, + .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx, + .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx, + .sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove, + .sta_statistics = ath10k_sta_statistics, + .set_tid_config = ath10k_mac_op_set_tid_config, + .reset_tid_config = ath10k_mac_op_reset_tid_config, + + CFG80211_TESTMODE_CMD(ath10k_tm_cmd) + +#ifdef CONFIG_PM + .suspend = ath10k_wow_op_suspend, + .resume = ath10k_wow_op_resume, + .set_wakeup = ath10k_wow_op_set_wakeup, +#endif +#ifdef CONFIG_MAC80211_DEBUGFS + .sta_add_debugfs = ath10k_sta_add_debugfs, +#endif + .set_sar_specs = ath10k_mac_set_sar_specs, }; -#define RATETAB_ENT(_rate, _rateid, _flags) { \ - .bitrate = (_rate), \ - .flags = (_flags), \ - .hw_value = (_rateid), \ -} - #define CHAN2G(_channel, _freq, _flags) { \ - .band = IEEE80211_BAND_2GHZ, \ + .band = NL80211_BAND_2GHZ, \ .hw_value = (_channel), \ .center_freq = (_freq), \ .flags = (_flags), \ @@ -2698,7 +9541,7 @@ static const struct ieee80211_ops ath10k_ops = { } #define CHAN5G(_channel, _freq, _flags) { \ - .band = IEEE80211_BAND_5GHZ, \ + .band = NL80211_BAND_5GHZ, \ .hw_value = (_channel), \ .center_freq = (_freq), \ .flags = (_flags), \ @@ -2743,166 +9586,264 @@ static const struct ieee80211_channel ath10k_5ghz_channels[] = { CHAN5G(132, 5660, 0), CHAN5G(136, 5680, 0), CHAN5G(140, 5700, 0), + CHAN5G(144, 5720, 0), CHAN5G(149, 5745, 0), CHAN5G(153, 5765, 0), CHAN5G(157, 5785, 0), CHAN5G(161, 5805, 0), CHAN5G(165, 5825, 0), + CHAN5G(169, 5845, 0), + CHAN5G(173, 5865, 0), + /* If you add more, you may need to change ATH10K_MAX_5G_CHAN */ + /* And you will definitely need to change ATH10K_NUM_CHANS in core.h */ }; -static struct ieee80211_rate ath10k_rates[] = { - /* CCK */ - RATETAB_ENT(10, 0x82, 0), - RATETAB_ENT(20, 0x84, 0), - RATETAB_ENT(55, 0x8b, 0), - RATETAB_ENT(110, 0x96, 0), - /* OFDM */ - RATETAB_ENT(60, 0x0c, 0), - RATETAB_ENT(90, 0x12, 0), - RATETAB_ENT(120, 0x18, 0), - RATETAB_ENT(180, 0x24, 0), - RATETAB_ENT(240, 0x30, 0), - RATETAB_ENT(360, 0x48, 0), - RATETAB_ENT(480, 0x60, 0), - RATETAB_ENT(540, 0x6c, 0), -}; - -#define ath10k_a_rates (ath10k_rates + 4) -#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - 4) -#define ath10k_g_rates (ath10k_rates + 0) -#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates)) - -struct ath10k *ath10k_mac_create(void) +struct ath10k *ath10k_mac_create(size_t priv_size) { struct ieee80211_hw *hw; + struct ieee80211_ops *ops; struct ath10k *ar; - hw = ieee80211_alloc_hw(sizeof(struct ath10k), &ath10k_ops); - if (!hw) + ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL); + if (!ops) return NULL; + hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops); + if (!hw) { + kfree(ops); + return NULL; + } + ar = hw->priv; ar->hw = hw; + ar->ops = ops; return ar; } void ath10k_mac_destroy(struct ath10k *ar) { + struct ieee80211_ops *ops = ar->ops; + ieee80211_free_hw(ar->hw); + kfree(ops); } static const struct ieee80211_iface_limit ath10k_if_limits[] = { { - .max = 8, - .types = BIT(NL80211_IFTYPE_STATION) - | BIT(NL80211_IFTYPE_P2P_CLIENT) - | BIT(NL80211_IFTYPE_P2P_GO) - | BIT(NL80211_IFTYPE_AP) - } + .max = 8, + .types = BIT(NL80211_IFTYPE_STATION) + | BIT(NL80211_IFTYPE_P2P_CLIENT) + }, + { + .max = 3, + .types = BIT(NL80211_IFTYPE_P2P_GO) + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE) + }, + { + .max = 7, + .types = BIT(NL80211_IFTYPE_AP) +#ifdef CONFIG_MAC80211_MESH + | BIT(NL80211_IFTYPE_MESH_POINT) +#endif + }, }; -static const struct ieee80211_iface_combination ath10k_if_comb = { - .limits = ath10k_if_limits, - .n_limits = ARRAY_SIZE(ath10k_if_limits), - .max_interfaces = 8, - .num_different_channels = 1, - .beacon_int_infra_match = true, +static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = { + { + .max = 8, + .types = BIT(NL80211_IFTYPE_AP) +#ifdef CONFIG_MAC80211_MESH + | BIT(NL80211_IFTYPE_MESH_POINT) +#endif + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_STATION) + }, }; -static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) -{ - struct ieee80211_sta_vht_cap vht_cap = {0}; - u16 mcs_map; - - vht_cap.vht_supported = 1; - vht_cap.cap = ar->vht_cap_info; - - /* FIXME: check dynamically how many streams board supports */ - mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | - IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | - IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 14; - - vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); - vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); - - return vht_cap; -} - -static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar) -{ - int i; - struct ieee80211_sta_ht_cap ht_cap = {0}; - - if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED)) - return ht_cap; - - ht_cap.ht_supported = 1; - ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; - ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; - ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; - ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40; - ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT; - - if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI) - ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; - - if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI) - ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; - - if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) { - u32 smps; - - smps = WLAN_HT_CAP_SM_PS_DYNAMIC; - smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT; - - ht_cap.cap |= smps; - } - - if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC) - ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC; - - if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) { - u32 stbc; +static const struct ieee80211_iface_combination ath10k_if_comb[] = { + { + .limits = ath10k_if_limits, + .n_limits = ARRAY_SIZE(ath10k_if_limits), + .max_interfaces = 8, + .num_different_channels = 1, + .beacon_int_infra_match = true, + }, +}; - stbc = ar->ht_cap_info; - stbc &= WMI_HT_CAP_RX_STBC; - stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT; - stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT; - stbc &= IEEE80211_HT_CAP_RX_STBC; +static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = { + { + .limits = ath10k_10x_if_limits, + .n_limits = ARRAY_SIZE(ath10k_10x_if_limits), + .max_interfaces = 8, + .num_different_channels = 1, + .beacon_int_infra_match = true, + .beacon_int_min_gcd = 1, +#ifdef CONFIG_ATH10K_DFS_CERTIFIED + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80), +#endif + }, +}; - ht_cap.cap |= stbc; - } +static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = { + { + .max = 2, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 2, + .types = BIT(NL80211_IFTYPE_AP) | +#ifdef CONFIG_MAC80211_MESH + BIT(NL80211_IFTYPE_MESH_POINT) | +#endif + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE), + }, +}; - if (ar->ht_cap_info & WMI_HT_CAP_LDPC) - ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; +static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = { + { + .max = 2, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 2, + .types = BIT(NL80211_IFTYPE_P2P_CLIENT), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_AP) | +#ifdef CONFIG_MAC80211_MESH + BIT(NL80211_IFTYPE_MESH_POINT) | +#endif + BIT(NL80211_IFTYPE_P2P_GO), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE), + }, +}; - if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT) - ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT; +static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_ADHOC), + }, +}; - /* max AMSDU is implicitly taken from vht_cap_info */ - if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK) - ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU; +/* FIXME: This is not thoroughly tested. These combinations may over- or + * underestimate hw/fw capabilities. + */ +static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = { + { + .limits = ath10k_tlv_if_limit, + .num_different_channels = 1, + .max_interfaces = 4, + .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), + }, + { + .limits = ath10k_tlv_if_limit_ibss, + .num_different_channels = 1, + .max_interfaces = 2, + .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss), + }, +}; - for (i = 0; i < WMI_MAX_SPATIAL_STREAM; i++) - ht_cap.mcs.rx_mask[i] = 0xFF; +static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = { + { + .limits = ath10k_tlv_if_limit, + .num_different_channels = 1, + .max_interfaces = 4, + .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), + }, + { + .limits = ath10k_tlv_qcs_if_limit, + .num_different_channels = 2, + .max_interfaces = 4, + .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit), + }, + { + .limits = ath10k_tlv_if_limit_ibss, + .num_different_channels = 1, + .max_interfaces = 2, + .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss), + }, +}; - ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; +static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 16, + .types = BIT(NL80211_IFTYPE_AP) +#ifdef CONFIG_MAC80211_MESH + | BIT(NL80211_IFTYPE_MESH_POINT) +#endif + }, +}; - return ht_cap; -} +static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = { + { + .limits = ath10k_10_4_if_limits, + .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits), + .max_interfaces = 16, + .num_different_channels = 1, + .beacon_int_infra_match = true, + .beacon_int_min_gcd = 1, +#ifdef CONFIG_ATH10K_DFS_CERTIFIED + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80) | + BIT(NL80211_CHAN_WIDTH_80P80) | + BIT(NL80211_CHAN_WIDTH_160), +#endif + }, +}; +static const struct +ieee80211_iface_combination ath10k_10_4_bcn_int_if_comb[] = { + { + .limits = ath10k_10_4_if_limits, + .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits), + .max_interfaces = 16, + .num_different_channels = 1, + .beacon_int_infra_match = true, + .beacon_int_min_gcd = 100, +#ifdef CONFIG_ATH10K_DFS_CERTIFIED + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80) | + BIT(NL80211_CHAN_WIDTH_80P80) | + BIT(NL80211_CHAN_WIDTH_160), +#endif + }, +}; static void ath10k_get_arvif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct ath10k_vif_iter *arvif_iter = data; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; if (arvif->vdev_id == arvif_iter->vdev_id) arvif_iter->arvif = arvif; @@ -2911,56 +9852,186 @@ static void ath10k_get_arvif_iter(void *data, u8 *mac, struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id) { struct ath10k_vif_iter arvif_iter; - u32 flags; memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter)); arvif_iter.vdev_id = vdev_id; - flags = IEEE80211_IFACE_ITER_RESUME_ALL; ieee80211_iterate_active_interfaces_atomic(ar->hw, - flags, + ATH10K_ITER_RESUME_FLAGS, ath10k_get_arvif_iter, &arvif_iter); if (!arvif_iter.arvif) { - ath10k_warn("No VIF found for VDEV: %d\n", vdev_id); + ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id); return NULL; } return arvif_iter.arvif; } +#define WRD_METHOD "WRDD" +#define WRDD_WIFI (0x07) + +static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd) +{ + union acpi_object *mcc_pkg; + union acpi_object *domain_type; + union acpi_object *mcc_value; + u32 i; + + if (wrdd->type != ACPI_TYPE_PACKAGE || + wrdd->package.count < 2 || + wrdd->package.elements[0].type != ACPI_TYPE_INTEGER || + wrdd->package.elements[0].integer.value != 0) { + ath10k_warn(ar, "ignoring malformed/unsupported wrdd structure\n"); + return 0; + } + + for (i = 1; i < wrdd->package.count; ++i) { + mcc_pkg = &wrdd->package.elements[i]; + + if (mcc_pkg->type != ACPI_TYPE_PACKAGE) + continue; + if (mcc_pkg->package.count < 2) + continue; + if (mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER || + mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) + continue; + + domain_type = &mcc_pkg->package.elements[0]; + if (domain_type->integer.value != WRDD_WIFI) + continue; + + mcc_value = &mcc_pkg->package.elements[1]; + return mcc_value->integer.value; + } + return 0; +} + +static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd) +{ + acpi_handle root_handle; + acpi_handle handle; + struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL}; + acpi_status status; + u32 alpha2_code; + char alpha2[3]; + + root_handle = ACPI_HANDLE(ar->dev); + if (!root_handle) + return -EOPNOTSUPP; + + status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle); + if (ACPI_FAILURE(status)) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "failed to get wrd method %d\n", status); + return -EIO; + } + + status = acpi_evaluate_object(handle, NULL, NULL, &wrdd); + if (ACPI_FAILURE(status)) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "failed to call wrdc %d\n", status); + return -EIO; + } + + alpha2_code = ath10k_mac_wrdd_get_mcc(ar, wrdd.pointer); + kfree(wrdd.pointer); + if (!alpha2_code) + return -EIO; + + alpha2[0] = (alpha2_code >> 8) & 0xff; + alpha2[1] = (alpha2_code >> 0) & 0xff; + alpha2[2] = '\0'; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "regulatory hint from WRDD (alpha2-code): %s\n", alpha2); + + *rd = ath_regd_find_country_by_name(alpha2); + if (*rd == 0xffff) + return -EIO; + + *rd |= COUNTRY_ERD_FLAG; + return 0; +} + +static int ath10k_mac_init_rd(struct ath10k *ar) +{ + int ret; + u16 rd; + + ret = ath10k_mac_get_wrdd_regulatory(ar, &rd); + if (ret) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "fallback to eeprom programmed regulatory settings\n"); + rd = ar->hw_eeprom_rd; + } + + ar->ath_common.regulatory.current_rd = rd; + return 0; +} + int ath10k_mac_register(struct ath10k *ar) { + static const u32 cipher_suites[] = { + WLAN_CIPHER_SUITE_WEP40, + WLAN_CIPHER_SUITE_WEP104, + WLAN_CIPHER_SUITE_TKIP, + WLAN_CIPHER_SUITE_CCMP, + + /* Do not add hardware supported ciphers before this line. + * Allow software encryption for all chips. Don't forget to + * update n_cipher_suites below. + */ + WLAN_CIPHER_SUITE_AES_CMAC, + WLAN_CIPHER_SUITE_BIP_CMAC_256, + WLAN_CIPHER_SUITE_BIP_GMAC_128, + WLAN_CIPHER_SUITE_BIP_GMAC_256, + + /* Only QCA99x0 and QCA4019 variants support GCMP-128, GCMP-256 + * and CCMP-256 in hardware. + */ + WLAN_CIPHER_SUITE_GCMP, + WLAN_CIPHER_SUITE_GCMP_256, + WLAN_CIPHER_SUITE_CCMP_256, + }; struct ieee80211_supported_band *band; - struct ieee80211_sta_vht_cap vht_cap; - struct ieee80211_sta_ht_cap ht_cap; void *channels; int ret; + if (!is_valid_ether_addr(ar->mac_addr)) { + ath10k_warn(ar, "invalid MAC address; choosing random\n"); + eth_random_addr(ar->mac_addr); + } SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr); SET_IEEE80211_DEV(ar->hw, ar->dev); - ht_cap = ath10k_get_ht_cap(ar); - vht_cap = ath10k_create_vht_cap(ar); + BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) + + ARRAY_SIZE(ath10k_5ghz_channels)) != + ATH10K_NUM_CHANS); if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { channels = kmemdup(ath10k_2ghz_channels, sizeof(ath10k_2ghz_channels), GFP_KERNEL); - if (!channels) - return -ENOMEM; + if (!channels) { + ret = -ENOMEM; + goto err_free; + } - band = &ar->mac.sbands[IEEE80211_BAND_2GHZ]; + band = &ar->mac.sbands[NL80211_BAND_2GHZ]; band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels); band->channels = channels; - band->n_bitrates = ath10k_g_rates_size; - band->bitrates = ath10k_g_rates; - band->ht_cap = ht_cap; - /* vht is not supported in 2.4 GHz */ + if (ar->hw_params.cck_rate_map_rev2) { + band->n_bitrates = ath10k_g_rates_rev2_size; + band->bitrates = ath10k_g_rates_rev2; + } else { + band->n_bitrates = ath10k_g_rates_size; + band->bitrates = ath10k_g_rates; + } - ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band; + ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band; } if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { @@ -2968,93 +10039,332 @@ int ath10k_mac_register(struct ath10k *ar) sizeof(ath10k_5ghz_channels), GFP_KERNEL); if (!channels) { - if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { - band = &ar->mac.sbands[IEEE80211_BAND_2GHZ]; - kfree(band->channels); - } - return -ENOMEM; + ret = -ENOMEM; + goto err_free; } - band = &ar->mac.sbands[IEEE80211_BAND_5GHZ]; + band = &ar->mac.sbands[NL80211_BAND_5GHZ]; band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels); band->channels = channels; band->n_bitrates = ath10k_a_rates_size; band->bitrates = ath10k_a_rates; - band->ht_cap = ht_cap; - band->vht_cap = vht_cap; - ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band; + ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band; } + wiphy_read_of_freq_limits(ar->hw->wiphy); + ath10k_mac_setup_ht_vht_cap(ar); + ar->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_P2P_CLIENT) | - BIT(NL80211_IFTYPE_P2P_GO); - - ar->hw->flags = IEEE80211_HW_SIGNAL_DBM | - IEEE80211_HW_SUPPORTS_PS | - IEEE80211_HW_SUPPORTS_DYNAMIC_PS | - IEEE80211_HW_SUPPORTS_UAPSD | - IEEE80211_HW_MFP_CAPABLE | - IEEE80211_HW_REPORTS_TX_ACK_STATUS | - IEEE80211_HW_HAS_RATE_CONTROL | - IEEE80211_HW_SUPPORTS_STATIC_SMPS | - IEEE80211_HW_WANT_MONITOR_VIF | - IEEE80211_HW_AP_LINK_PS; + BIT(NL80211_IFTYPE_MESH_POINT); + + ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask; + ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask; + + if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features)) + ar->hw->wiphy->interface_modes |= + BIT(NL80211_IFTYPE_P2P_DEVICE) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO); + + ieee80211_hw_set(ar->hw, SIGNAL_DBM); + + if (!test_bit(ATH10K_FW_FEATURE_NO_PS, + ar->running_fw->fw_file.fw_features)) { + ieee80211_hw_set(ar->hw, SUPPORTS_PS); + ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS); + } + + ieee80211_hw_set(ar->hw, MFP_CAPABLE); + ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS); + ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL); + ieee80211_hw_set(ar->hw, AP_LINK_PS); + ieee80211_hw_set(ar->hw, SPECTRUM_MGMT); + ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT); + ieee80211_hw_set(ar->hw, CONNECTION_MONITOR); + ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK); + ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF); + ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA); + ieee80211_hw_set(ar->hw, QUEUE_CONTROL); + ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG); + ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK); + + if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) + ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL); + + ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; + ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) - ar->hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS; + ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) { - ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; - ar->hw->flags |= IEEE80211_HW_TX_AMPDU_SETUP_IN_HW; + ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION); + ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW); } ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; + if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) { + ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS; + ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS; + ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH; + ar->hw->wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS; + ar->hw->wiphy->max_sched_scan_plan_interval = + WMI_PNO_MAX_SCHED_SCAN_PLAN_INT; + ar->hw->wiphy->max_sched_scan_plan_iterations = + WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS; + ar->hw->wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR; + } + ar->hw->vif_data_size = sizeof(struct ath10k_vif); + ar->hw->sta_data_size = sizeof(struct ath10k_sta); + ar->hw->txq_data_size = sizeof(struct ath10k_txq); - ar->hw->channel_change_time = 5000; ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL; + if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) { + ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; + + /* Firmware delivers WPS/P2P Probe Requests frames to driver so + * that userspace (e.g. wpa_supplicant/hostapd) can generate + * correct Probe Responses. This is more of a hack advert.. + */ + ar->hw->wiphy->probe_resp_offload |= + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; + } + + if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map) || + test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) { + ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; + if (test_bit(WMI_SERVICE_TDLS_WIDER_BANDWIDTH, ar->wmi.svc_map)) + ieee80211_hw_set(ar->hw, TDLS_WIDER_BW); + } + + if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map)) + ieee80211_hw_set(ar->hw, SUPPORTS_TDLS_BUFFER_STA); + + if (ath10k_frame_mode == ATH10K_HW_TXRX_ETHERNET) { + if (ar->wmi.vdev_param->tx_encap_type != + WMI_VDEV_PARAM_UNSUPPORTED) + ieee80211_hw_set(ar->hw, SUPPORTS_TX_ENCAP_OFFLOAD); + } + ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; + ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; ar->hw->wiphy->max_remain_on_channel_duration = 5000; ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; + ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | + NL80211_FEATURE_AP_SCAN; + + ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations; + + ret = ath10k_wow_init(ar); + if (ret) { + ath10k_warn(ar, "failed to init wow: %d\n", ret); + goto err_free; + } + + wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); + wiphy_ext_feature_set(ar->hw->wiphy, + NL80211_EXT_FEATURE_SET_SCAN_DWELL); + wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_AQL); + + if (ar->hw_params.mcast_frame_registration) + wiphy_ext_feature_set(ar->hw->wiphy, + NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS); + + if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map) || + test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, ar->wmi.svc_map)) + wiphy_ext_feature_set(ar->hw->wiphy, + NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT); + + if (ath10k_peer_stats_enabled(ar) || + test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map)) + wiphy_ext_feature_set(ar->hw->wiphy, + NL80211_EXT_FEATURE_AIRTIME_FAIRNESS); + + if (test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map)) + wiphy_ext_feature_set(ar->hw->wiphy, + NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER); + + if (test_bit(WMI_SERVICE_TX_PWR_PER_PEER, ar->wmi.svc_map)) + wiphy_ext_feature_set(ar->hw->wiphy, + NL80211_EXT_FEATURE_STA_TX_PWR); + + if (test_bit(WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT, ar->wmi.svc_map)) { + ar->hw->wiphy->tid_config_support.vif |= + BIT(NL80211_TID_CONFIG_ATTR_NOACK) | + BIT(NL80211_TID_CONFIG_ATTR_RETRY_SHORT) | + BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG) | + BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL) | + BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) | + BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE); + + if (test_bit(WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT, + ar->wmi.svc_map)) { + ar->hw->wiphy->tid_config_support.vif |= + BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL); + } + + ar->hw->wiphy->tid_config_support.peer = + ar->hw->wiphy->tid_config_support.vif; + ar->hw->wiphy->max_data_retry_count = ATH10K_MAX_RETRY_COUNT; + } else { + ar->ops->set_tid_config = NULL; + } /* * on LL hardware queues are managed entirely by the FW * so we only advertise to mac we can do the queues thing */ - ar->hw->queues = 4; + ar->hw->queues = IEEE80211_MAX_QUEUES; + + /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is + * something that vdev_ids can't reach so that we don't stop the queue + * accidentally. + */ + ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1; + + switch (ar->running_fw->fw_file.wmi_op_version) { + case ATH10K_FW_WMI_OP_VERSION_MAIN: + ar->hw->wiphy->iface_combinations = ath10k_if_comb; + ar->hw->wiphy->n_iface_combinations = + ARRAY_SIZE(ath10k_if_comb); + ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); + break; + case ATH10K_FW_WMI_OP_VERSION_TLV: + if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { + ar->hw->wiphy->iface_combinations = + ath10k_tlv_qcs_if_comb; + ar->hw->wiphy->n_iface_combinations = + ARRAY_SIZE(ath10k_tlv_qcs_if_comb); + } else { + ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb; + ar->hw->wiphy->n_iface_combinations = + ARRAY_SIZE(ath10k_tlv_if_comb); + } + ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); + break; + case ATH10K_FW_WMI_OP_VERSION_10_1: + case ATH10K_FW_WMI_OP_VERSION_10_2: + case ATH10K_FW_WMI_OP_VERSION_10_2_4: + ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb; + ar->hw->wiphy->n_iface_combinations = + ARRAY_SIZE(ath10k_10x_if_comb); + break; + case ATH10K_FW_WMI_OP_VERSION_10_4: + ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb; + ar->hw->wiphy->n_iface_combinations = + ARRAY_SIZE(ath10k_10_4_if_comb); + if (test_bit(WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, + ar->wmi.svc_map)) { + ar->hw->wiphy->iface_combinations = + ath10k_10_4_bcn_int_if_comb; + ar->hw->wiphy->n_iface_combinations = + ARRAY_SIZE(ath10k_10_4_bcn_int_if_comb); + } + break; + case ATH10K_FW_WMI_OP_VERSION_UNSET: + case ATH10K_FW_WMI_OP_VERSION_MAX: + WARN_ON(1); + ret = -EINVAL; + goto err_free; + } - ar->hw->wiphy->iface_combinations = &ath10k_if_comb; - ar->hw->wiphy->n_iface_combinations = 1; + if (ar->hw_params.dynamic_sar_support) + ar->hw->wiphy->sar_capa = &ath10k_sar_capa; + + if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) + ar->hw->netdev_features = NETIF_F_HW_CSUM; + + if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) { + /* Init ath dfs pattern detector */ + ar->ath_common.debug_mask = ATH_DBG_DFS; + ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common, + NL80211_DFS_UNSET); + + if (!ar->dfs_detector) + ath10k_warn(ar, "failed to initialise DFS pattern detector\n"); + } + + ret = ath10k_mac_init_rd(ar); + if (ret) { + ath10k_err(ar, "failed to derive regdom: %d\n", ret); + goto err_dfs_detector_exit; + } + + /* Disable set_coverage_class for chipsets that do not support it. */ + if (!ar->hw_params.hw_ops->set_coverage_class) + ar->ops->set_coverage_class = NULL; ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, ath10k_reg_notifier); if (ret) { - ath10k_err("Regulatory initialization failed\n"); - return ret; + ath10k_err(ar, "failed to initialise regulatory: %i\n", ret); + goto err_dfs_detector_exit; + } + + if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) { + ar->hw->wiphy->features |= + NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; + } + + ar->hw->wiphy->cipher_suites = cipher_suites; + + /* QCA988x and QCA6174 family chips do not support CCMP-256, GCMP-128 + * and GCMP-256 ciphers in hardware. Fetch number of ciphers supported + * from chip specific hw_param table. + */ + if (!ar->hw_params.n_cipher_suites || + ar->hw_params.n_cipher_suites > ARRAY_SIZE(cipher_suites)) { + ath10k_err(ar, "invalid hw_params.n_cipher_suites %d\n", + ar->hw_params.n_cipher_suites); + ar->hw_params.n_cipher_suites = 8; } + ar->hw->wiphy->n_cipher_suites = ar->hw_params.n_cipher_suites; + + wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + + ar->hw->weight_multiplier = ATH10K_AIRTIME_WEIGHT_MULTIPLIER; ret = ieee80211_register_hw(ar->hw); if (ret) { - ath10k_err("ieee80211 registration failed: %d\n", ret); - return ret; + ath10k_err(ar, "failed to register ieee80211: %d\n", ret); + goto err_dfs_detector_exit; } - if (!ath_is_world_regd(&ar->ath_common.regulatory)) { + if (test_bit(WMI_SERVICE_PER_PACKET_SW_ENCRYPT, ar->wmi.svc_map)) { + ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN); + ar->hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN); + } + + if (!ath_is_world_regd(&ar->ath_common.reg_world_copy) && + !ath_is_world_regd(&ar->ath_common.regulatory)) { ret = regulatory_hint(ar->hw->wiphy, ar->ath_common.regulatory.alpha2); if (ret) - goto exit; + goto err_unregister; } return 0; -exit: + +err_unregister: ieee80211_unregister_hw(ar->hw); + +err_dfs_detector_exit: + if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) + ar->dfs_detector->exit(ar->dfs_detector); + +err_free: + kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); + kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); + + SET_IEEE80211_DEV(ar->hw, NULL); return ret; } @@ -3062,8 +10372,11 @@ void ath10k_mac_unregister(struct ath10k *ar) { ieee80211_unregister_hw(ar->hw); - kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels); - kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels); + if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) + ar->dfs_detector->exit(ar->dfs_detector); + + kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); + kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); SET_IEEE80211_DEV(ar->hw, NULL); } diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h index 27fc92e58829..98d83a26ea60 100644 --- a/drivers/net/wireless/ath/ath10k/mac.h +++ b/drivers/net/wireless/ath/ath10k/mac.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. */ #ifndef _MAC_H_ @@ -21,31 +10,76 @@ #include <net/mac80211.h> #include "core.h" +#define WEP_KEYID_SHIFT 6 + +enum wmi_tlv_tx_pause_id; +enum wmi_tlv_tx_pause_action; + struct ath10k_generic_iter { struct ath10k *ar; int ret; }; -struct ath10k *ath10k_mac_create(void); +struct rfc1042_hdr { + u8 llc_dsap; + u8 llc_ssap; + u8 llc_ctrl; + u8 snap_oui[3]; + __be16 snap_type; +} __packed; + +struct ath10k *ath10k_mac_create(size_t priv_size); void ath10k_mac_destroy(struct ath10k *ar); int ath10k_mac_register(struct ath10k *ar); void ath10k_mac_unregister(struct ath10k *ar); struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id); -void ath10k_reset_scan(unsigned long ptr); +void __ath10k_scan_finish(struct ath10k *ar); +void ath10k_scan_finish(struct ath10k *ar); +void ath10k_scan_timeout_work(struct work_struct *work); void ath10k_offchan_tx_purge(struct ath10k *ar); void ath10k_offchan_tx_work(struct work_struct *work); +void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar); +void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work); +void ath10k_halt(struct ath10k *ar); +void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif); +void ath10k_drain_tx(struct ath10k *ar); +bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr, + u8 keyidx); +int ath10k_mac_vif_chan(struct ieee80211_vif *vif, + struct cfg80211_chan_def *def); -static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif) -{ - return (struct ath10k_vif *)vif->drv_priv; -} +void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb); +void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id); +void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id, + enum wmi_tlv_tx_pause_id pause_id, + enum wmi_tlv_tx_pause_action action); + +u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, + u8 hw_rate, bool cck); +u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, + u32 bitrate); + +void ath10k_mac_tx_lock(struct ath10k *ar, int reason); +void ath10k_mac_tx_unlock(struct ath10k *ar, int reason); +void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason); +void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason); +bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar); +void ath10k_mac_tx_push_pending(struct ath10k *ar); +int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, + struct ieee80211_txq *txq); +struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar, + u16 peer_id, + u8 tid); +int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val); +void ath10k_mac_wait_tx_complete(struct ath10k *ar); +int ath10k_mac_rfkill_enable_radio(struct ath10k *ar, bool enable); -static inline void ath10k_tx_h_seq_no(struct sk_buff *skb) +static inline void ath10k_tx_h_seq_no(struct ieee80211_vif *vif, + struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct ieee80211_vif *vif = info->control.vif; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { if (arvif->tx_seq_no == 0) diff --git a/drivers/net/wireless/ath/ath10k/p2p.c b/drivers/net/wireless/ath/ath10k/p2p.c new file mode 100644 index 000000000000..517b30f56b72 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/p2p.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2015 Qualcomm Atheros, Inc. + */ + +#include "core.h" +#include "wmi.h" +#include "mac.h" +#include "p2p.h" + +static void ath10k_p2p_noa_ie_fill(u8 *data, size_t len, + const struct wmi_p2p_noa_info *noa) +{ + struct ieee80211_p2p_noa_attr *noa_attr; + u8 ctwindow_oppps = noa->ctwindow_oppps; + u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET; + bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT); + __le16 *noa_attr_len; + u16 attr_len; + u8 noa_descriptors = noa->num_descriptors; + int i; + + /* P2P IE */ + data[0] = WLAN_EID_VENDOR_SPECIFIC; + data[1] = len - 2; + data[2] = (WLAN_OUI_WFA >> 16) & 0xff; + data[3] = (WLAN_OUI_WFA >> 8) & 0xff; + data[4] = (WLAN_OUI_WFA >> 0) & 0xff; + data[5] = WLAN_OUI_TYPE_WFA_P2P; + + /* NOA ATTR */ + data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE; + noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */ + noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9]; + + noa_attr->index = noa->index; + noa_attr->oppps_ctwindow = ctwindow; + if (oppps) + noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT; + + for (i = 0; i < noa_descriptors; i++) { + noa_attr->desc[i].count = + __le32_to_cpu(noa->descriptors[i].type_count); + noa_attr->desc[i].duration = noa->descriptors[i].duration; + noa_attr->desc[i].interval = noa->descriptors[i].interval; + noa_attr->desc[i].start_time = noa->descriptors[i].start_time; + } + + attr_len = 2; /* index + oppps_ctwindow */ + attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc); + *noa_attr_len = __cpu_to_le16(attr_len); +} + +static size_t ath10k_p2p_noa_ie_len_compute(const struct wmi_p2p_noa_info *noa) +{ + size_t len = 0; + + if (!noa->num_descriptors && + !(noa->ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT)) + return 0; + + len += 1 + 1 + 4; /* EID + len + OUI */ + len += 1 + 2; /* noa attr + attr len */ + len += 1 + 1; /* index + oppps_ctwindow */ + len += noa->num_descriptors * sizeof(struct ieee80211_p2p_noa_desc); + + return len; +} + +static void ath10k_p2p_noa_ie_assign(struct ath10k_vif *arvif, void *ie, + size_t len) +{ + struct ath10k *ar = arvif->ar; + + lockdep_assert_held(&ar->data_lock); + + kfree(arvif->u.ap.noa_data); + + arvif->u.ap.noa_data = ie; + arvif->u.ap.noa_len = len; +} + +static void __ath10k_p2p_noa_update(struct ath10k_vif *arvif, + const struct wmi_p2p_noa_info *noa) +{ + struct ath10k *ar = arvif->ar; + void *ie; + size_t len; + + lockdep_assert_held(&ar->data_lock); + + ath10k_p2p_noa_ie_assign(arvif, NULL, 0); + + len = ath10k_p2p_noa_ie_len_compute(noa); + if (!len) + return; + + ie = kmalloc(len, GFP_ATOMIC); + if (!ie) + return; + + ath10k_p2p_noa_ie_fill(ie, len, noa); + ath10k_p2p_noa_ie_assign(arvif, ie, len); +} + +void ath10k_p2p_noa_update(struct ath10k_vif *arvif, + const struct wmi_p2p_noa_info *noa) +{ + struct ath10k *ar = arvif->ar; + + spin_lock_bh(&ar->data_lock); + __ath10k_p2p_noa_update(arvif, noa); + spin_unlock_bh(&ar->data_lock); +} + +struct ath10k_p2p_noa_arg { + u32 vdev_id; + const struct wmi_p2p_noa_info *noa; +}; + +static void ath10k_p2p_noa_update_vdev_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct ath10k_p2p_noa_arg *arg = data; + + if (arvif->vdev_id != arg->vdev_id) + return; + + ath10k_p2p_noa_update(arvif, arg->noa); +} + +void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id, + const struct wmi_p2p_noa_info *noa) +{ + struct ath10k_p2p_noa_arg arg = { + .vdev_id = vdev_id, + .noa = noa, + }; + + ieee80211_iterate_active_interfaces_atomic(ar->hw, + ATH10K_ITER_NORMAL_FLAGS, + ath10k_p2p_noa_update_vdev_iter, + &arg); +} diff --git a/drivers/net/wireless/ath/ath10k/p2p.h b/drivers/net/wireless/ath/ath10k/p2p.h new file mode 100644 index 000000000000..7d7f44809fbb --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/p2p.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2015 Qualcomm Atheros, Inc. + */ + +#ifndef _P2P_H +#define _P2P_H + +struct ath10k_vif; +struct wmi_p2p_noa_info; + +void ath10k_p2p_noa_update(struct ath10k_vif *arvif, + const struct wmi_p2p_noa_info *noa); +void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id, + const struct wmi_p2p_noa_info *noa); + +#endif diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 33af4672c909..97b49bf4ad80 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1,27 +1,20 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include <linux/pci.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/spinlock.h> +#include <linux/bitops.h> #include "core.h" #include "debug.h" +#include "coredump.h" #include "targaddrs.h" #include "bmi.h" @@ -32,71 +25,878 @@ #include "ce.h" #include "pci.h" -unsigned int ath10k_target_ps; -module_param(ath10k_target_ps, uint, 0644); -MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option"); +enum ath10k_pci_reset_mode { + ATH10K_PCI_RESET_AUTO = 0, + ATH10K_PCI_RESET_WARM_ONLY = 1, +}; + +static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO; +static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO; + +module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644); +MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)"); + +module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644); +MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); + +/* how long wait to wait for target to initialise, in ms */ +#define ATH10K_PCI_TARGET_WAIT 3000 +#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 + +/* Maximum number of bytes that can be handled atomically by + * diag read and write. + */ +#define ATH10K_DIAG_TRANSFER_LIMIT 0x5000 -#define QCA988X_1_0_DEVICE_ID (0xabcd) -#define QCA988X_2_0_DEVICE_ID (0x003c) +#define QCA99X0_PCIE_BAR0_START_REG 0x81030 +#define QCA99X0_CPU_MEM_ADDR_REG 0x4d00c +#define QCA99X0_CPU_MEM_DATA_REG 0x4d010 + +static const struct pci_device_id ath10k_pci_id_table[] = { + /* PCI-E QCA988X V2 (Ubiquiti branded) */ + { PCI_VDEVICE(UBIQUITI, QCA988X_2_0_DEVICE_ID_UBNT) }, -static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = { - { PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */ { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ - {0} + { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ + { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */ + { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */ + { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */ + { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */ + { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */ + { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */ + {} }; -static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address, - u32 *data); - -static void ath10k_pci_process_ce(struct ath10k *ar); -static int ath10k_pci_post_rx(struct ath10k *ar); -static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info, - int num); -static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info); -static void ath10k_pci_stop_ce(struct ath10k *ar); - -static const struct ce_attr host_ce_config_wlan[] = { - /* host->target HTC control and raw streams */ - { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,}, - /* could be moved to share CE3 */ - /* target->host HTT + HTC control */ - { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,}, - /* target->host WMI */ - { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,}, - /* host->target WMI */ - { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, - /* host->target HTT */ - { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0, - CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, - /* unused */ - { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, - /* Target autonomous hif_memcpy */ - { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, - /* ce_diag, the Diagnostic Window */ - { /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,}, +static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { + /* QCA988X pre 2.0 chips are not supported because they need some nasty + * hacks. ath10k doesn't have them and these devices crash horribly + * because of that. + */ + { QCA988X_2_0_DEVICE_ID_UBNT, QCA988X_HW_2_0_CHIP_ID_REV }, + { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV }, + + { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, + { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, + { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, + { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, + { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, + + { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, + { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, + { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, + { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, + { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, + + { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV }, + + { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV }, + + { QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV }, + + { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV }, + { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV }, + + { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV }, +}; + +static void ath10k_pci_buffer_cleanup(struct ath10k *ar); +static int ath10k_pci_cold_reset(struct ath10k *ar); +static int ath10k_pci_safe_chip_reset(struct ath10k *ar); +static int ath10k_pci_init_irq(struct ath10k *ar); +static int ath10k_pci_deinit_irq(struct ath10k *ar); +static int ath10k_pci_request_irq(struct ath10k *ar); +static void ath10k_pci_free_irq(struct ath10k *ar); +static int ath10k_pci_bmi_wait(struct ath10k *ar, + struct ath10k_ce_pipe *tx_pipe, + struct ath10k_ce_pipe *rx_pipe, + struct bmi_xfer *xfer); +static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar); +static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state); + +static const struct ce_attr pci_host_ce_config_wlan[] = { + /* CE0: host->target HTC control and raw streams */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 16, + .src_sz_max = 256, + .dest_nentries = 0, + .send_cb = ath10k_pci_htc_tx_cb, + }, + + /* CE1: target->host HTT + HTC control */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath10k_pci_htt_htc_rx_cb, + }, + + /* CE2: target->host WMI */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 128, + .recv_cb = ath10k_pci_htc_rx_cb, + }, + + /* CE3: host->target WMI */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 32, + .src_sz_max = 2048, + .dest_nentries = 0, + .send_cb = ath10k_pci_htc_tx_cb, + }, + + /* CE4: host->target HTT */ + { + .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, + .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES, + .src_sz_max = 256, + .dest_nentries = 0, + .send_cb = ath10k_pci_htt_tx_cb, + }, + + /* CE5: target->host HTT (HIF->HTT) */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 512, + .dest_nentries = 512, + .recv_cb = ath10k_pci_htt_rx_cb, + }, + + /* CE6: target autonomous hif_memcpy */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* CE7: ce_diag, the Diagnostic Window */ + { + .flags = CE_ATTR_FLAGS | CE_ATTR_POLL, + .src_nentries = 2, + .src_sz_max = DIAG_TRANSFER_LIMIT, + .dest_nentries = 2, + }, + + /* CE8: target->host pktlog */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 128, + .recv_cb = ath10k_pci_pktlog_rx_cb, + }, + + /* CE9 target autonomous qcache memcpy */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* CE10: target autonomous hif memcpy */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* CE11: target autonomous hif memcpy */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, }; /* Target firmware's Copy Engine configuration. */ -static const struct ce_pipe_config target_ce_config_wlan[] = { - /* host->target HTC control and raw streams */ - { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,}, - /* target->host HTT + HTC control */ - { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,}, - /* target->host WMI */ - { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, - /* host->target WMI */ - { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, - /* host->target HTT */ - { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,}, +static const struct ce_pipe_config pci_target_ce_config_wlan[] = { + /* CE0: host->target HTC control and raw streams */ + { + .pipenum = __cpu_to_le32(0), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(256), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE1: target->host HTT + HTC control */ + { + .pipenum = __cpu_to_le32(1), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE2: target->host WMI */ + { + .pipenum = __cpu_to_le32(2), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(64), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE3: host->target WMI */ + { + .pipenum = __cpu_to_le32(3), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE4: host->target HTT */ + { + .pipenum = __cpu_to_le32(4), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(256), + .nbytes_max = __cpu_to_le32(256), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + /* NB: 50% of src nentries, since tx has 2 frags */ - /* unused */ - { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, - /* Reserved for target autonomous hif_memcpy */ - { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,}, + + /* CE5: target->host HTT (HIF->HTT) */ + { + .pipenum = __cpu_to_le32(5), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(512), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE6: Reserved for target autonomous hif_memcpy */ + { + .pipenum = __cpu_to_le32(6), + .pipedir = __cpu_to_le32(PIPEDIR_INOUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(4096), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + /* CE7 used only by Host */ + { + .pipenum = __cpu_to_le32(7), + .pipedir = __cpu_to_le32(PIPEDIR_INOUT), + .nentries = __cpu_to_le32(0), + .nbytes_max = __cpu_to_le32(0), + .flags = __cpu_to_le32(0), + .reserved = __cpu_to_le32(0), + }, + + /* CE8 target->host packtlog */ + { + .pipenum = __cpu_to_le32(8), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(64), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), + .reserved = __cpu_to_le32(0), + }, + + /* CE9 target autonomous qcache memcpy */ + { + .pipenum = __cpu_to_le32(9), + .pipedir = __cpu_to_le32(PIPEDIR_INOUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), + .reserved = __cpu_to_le32(0), + }, + + /* It not necessary to send target wlan configuration for CE10 & CE11 + * as these CEs are not actively used in target. + */ }; /* + * Map from service/endpoint to Copy Engine. + * This table is derived from the CE_PCI TABLE, above. + * It is passed to the Target at startup for use by firmware. + */ +static const struct ce_service_to_pipe pci_target_service_to_ce_map_wlan[] = { + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(0), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(1), + }, + { /* not used */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(0), + }, + { /* not used */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(1), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(4), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(5), + }, + + /* (Additions here) */ + + { /* must be last */ + __cpu_to_le32(0), + __cpu_to_le32(0), + __cpu_to_le32(0), + }, +}; + +static bool ath10k_pci_is_awake(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + + RTC_STATE_ADDRESS); + + return RTC_STATE_V_GET(val) == RTC_STATE_V_ON; +} + +static void __ath10k_pci_wake(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + lockdep_assert_held(&ar_pci->ps_lock); + + ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n", + ar_pci->ps_wake_refcount, ar_pci->ps_awake); + + iowrite32(PCIE_SOC_WAKE_V_MASK, + ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS); +} + +static void __ath10k_pci_sleep(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + lockdep_assert_held(&ar_pci->ps_lock); + + ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n", + ar_pci->ps_wake_refcount, ar_pci->ps_awake); + + iowrite32(PCIE_SOC_WAKE_RESET, + ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS); + ar_pci->ps_awake = false; +} + +static int ath10k_pci_wake_wait(struct ath10k *ar) +{ + int tot_delay = 0; + int curr_delay = 5; + + while (tot_delay < PCIE_WAKE_TIMEOUT) { + if (ath10k_pci_is_awake(ar)) { + if (tot_delay > PCIE_WAKE_LATE_US) + ath10k_warn(ar, "device wakeup took %d ms which is unusually long, otherwise it works normally.\n", + tot_delay / 1000); + return 0; + } + + udelay(curr_delay); + tot_delay += curr_delay; + + if (curr_delay < 50) + curr_delay += 5; + } + + return -ETIMEDOUT; +} + +static int ath10k_pci_force_wake(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + unsigned long flags; + int ret = 0; + + if (ar_pci->pci_ps) + return ret; + + spin_lock_irqsave(&ar_pci->ps_lock, flags); + + if (!ar_pci->ps_awake) { + iowrite32(PCIE_SOC_WAKE_V_MASK, + ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS); + + ret = ath10k_pci_wake_wait(ar); + if (ret == 0) + ar_pci->ps_awake = true; + } + + spin_unlock_irqrestore(&ar_pci->ps_lock, flags); + + return ret; +} + +static void ath10k_pci_force_sleep(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + unsigned long flags; + + spin_lock_irqsave(&ar_pci->ps_lock, flags); + + iowrite32(PCIE_SOC_WAKE_RESET, + ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS); + ar_pci->ps_awake = false; + + spin_unlock_irqrestore(&ar_pci->ps_lock, flags); +} + +static int ath10k_pci_wake(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + unsigned long flags; + int ret = 0; + + if (ar_pci->pci_ps == 0) + return ret; + + spin_lock_irqsave(&ar_pci->ps_lock, flags); + + ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n", + ar_pci->ps_wake_refcount, ar_pci->ps_awake); + + /* This function can be called very frequently. To avoid excessive + * CPU stalls for MMIO reads use a cache var to hold the device state. + */ + if (!ar_pci->ps_awake) { + __ath10k_pci_wake(ar); + + ret = ath10k_pci_wake_wait(ar); + if (ret == 0) + ar_pci->ps_awake = true; + } + + if (ret == 0) { + ar_pci->ps_wake_refcount++; + WARN_ON(ar_pci->ps_wake_refcount == 0); + } + + spin_unlock_irqrestore(&ar_pci->ps_lock, flags); + + return ret; +} + +static void ath10k_pci_sleep(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + unsigned long flags; + + if (ar_pci->pci_ps == 0) + return; + + spin_lock_irqsave(&ar_pci->ps_lock, flags); + + ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n", + ar_pci->ps_wake_refcount, ar_pci->ps_awake); + + if (WARN_ON(ar_pci->ps_wake_refcount == 0)) + goto skip; + + ar_pci->ps_wake_refcount--; + + mod_timer(&ar_pci->ps_timer, jiffies + + msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC)); + +skip: + spin_unlock_irqrestore(&ar_pci->ps_lock, flags); +} + +static void ath10k_pci_ps_timer(struct timer_list *t) +{ + struct ath10k_pci *ar_pci = timer_container_of(ar_pci, t, ps_timer); + struct ath10k *ar = ar_pci->ar; + unsigned long flags; + + spin_lock_irqsave(&ar_pci->ps_lock, flags); + + ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n", + ar_pci->ps_wake_refcount, ar_pci->ps_awake); + + if (ar_pci->ps_wake_refcount > 0) + goto skip; + + __ath10k_pci_sleep(ar); + +skip: + spin_unlock_irqrestore(&ar_pci->ps_lock, flags); +} + +static void ath10k_pci_sleep_sync(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + unsigned long flags; + + if (ar_pci->pci_ps == 0) { + ath10k_pci_force_sleep(ar); + return; + } + + timer_delete_sync(&ar_pci->ps_timer); + + spin_lock_irqsave(&ar_pci->ps_lock, flags); + WARN_ON(ar_pci->ps_wake_refcount > 0); + __ath10k_pci_sleep(ar); + spin_unlock_irqrestore(&ar_pci->ps_lock, flags); +} + +static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ret; + + if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) { + ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", + offset, offset + sizeof(value), ar_pci->mem_len); + return; + } + + ret = ath10k_pci_wake(ar); + if (ret) { + ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n", + value, offset, ret); + return; + } + + iowrite32(value, ar_pci->mem + offset); + ath10k_pci_sleep(ar); +} + +static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + u32 val; + int ret; + + if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) { + ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", + offset, offset + sizeof(val), ar_pci->mem_len); + return 0; + } + + ret = ath10k_pci_wake(ar); + if (ret) { + ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n", + offset, ret); + return 0xffffffff; + } + + val = ioread32(ar_pci->mem + offset); + ath10k_pci_sleep(ar); + + return val; +} + +inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + ce->bus_ops->write32(ar, offset, value); +} + +inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + return ce->bus_ops->read32(ar, offset); +} + +u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) +{ + return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr); +} + +void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val) +{ + ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val); +} + +u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr) +{ + return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr); +} + +void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) +{ + ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val); +} + +bool ath10k_pci_irq_pending(struct ath10k *ar) +{ + u32 cause; + + /* Check if the shared legacy irq is for us */ + cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + + PCIE_INTR_CAUSE_ADDRESS); + if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL)) + return true; + + return false; +} + +void ath10k_pci_disable_and_clear_intx_irq(struct ath10k *ar) +{ + /* IMPORTANT: INTR_CLR register has to be set after + * INTR_ENABLE is set to 0, otherwise interrupt can not be + * really cleared. + */ + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, + 0); + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS, + PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); + + /* IMPORTANT: this extra read transaction is required to + * flush the posted write buffer. + */ + (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + + PCIE_INTR_ENABLE_ADDRESS); +} + +void ath10k_pci_enable_intx_irq(struct ath10k *ar) +{ + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + + PCIE_INTR_ENABLE_ADDRESS, + PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); + + /* IMPORTANT: this extra read transaction is required to + * flush the posted write buffer. + */ + (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + + PCIE_INTR_ENABLE_ADDRESS); +} + +static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI) + return "msi"; + + return "legacy"; +} + +static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) +{ + struct ath10k *ar = pipe->hif_ce_state; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; + struct sk_buff *skb; + dma_addr_t paddr; + int ret; + + skb = dev_alloc_skb(pipe->buf_sz); + if (!skb) + return -ENOMEM; + + WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); + + paddr = dma_map_single(ar->dev, skb->data, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(ar->dev, paddr))) { + ath10k_warn(ar, "failed to dma map pci rx buf\n"); + dev_kfree_skb_any(skb); + return -EIO; + } + + ATH10K_SKB_RXCB(skb)->paddr = paddr; + + spin_lock_bh(&ce->ce_lock); + ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr); + spin_unlock_bh(&ce->ce_lock); + if (ret) { + dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + return ret; + } + + return 0; +} + +static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) +{ + struct ath10k *ar = pipe->hif_ce_state; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; + int ret, num; + + if (pipe->buf_sz == 0) + return; + + if (!ce_pipe->dest_ring) + return; + + spin_lock_bh(&ce->ce_lock); + num = __ath10k_ce_rx_num_free_bufs(ce_pipe); + spin_unlock_bh(&ce->ce_lock); + + while (num >= 0) { + ret = __ath10k_pci_rx_post_buf(pipe); + if (ret) { + if (ret == -ENOSPC) + break; + ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret); + mod_timer(&ar_pci->rx_post_retry, jiffies + + ATH10K_PCI_RX_POST_RETRY_MS); + break; + } + num--; + } +} + +void ath10k_pci_rx_post(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int i; + + for (i = 0; i < CE_COUNT; i++) + ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]); +} + +void ath10k_pci_rx_replenish_retry(struct timer_list *t) +{ + struct ath10k_pci *ar_pci = timer_container_of(ar_pci, t, + rx_post_retry); + struct ath10k *ar = ar_pci->ar; + + ath10k_pci_rx_post(ar); +} + +static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) +{ + u32 val = 0, region = addr & 0xfffff; + + val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS) + & 0x7ff) << 21; + val |= 0x100000 | region; + return val; +} + +/* Refactor from ath10k_pci_qca988x_targ_cpu_to_ce_addr. + * Support to access target space below 1M for qca6174 and qca9377. + * If target space is below 1M, the bit[20] of converted CE addr is 0. + * Otherwise bit[20] of converted CE addr is 1. + */ +static u32 ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) +{ + u32 val = 0, region = addr & 0xfffff; + + val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS) + & 0x7ff) << 21; + val |= ((addr >= 0x100000) ? 0x100000 : 0) | region; + return val; +} + +static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) +{ + u32 val = 0, region = addr & 0xfffff; + + val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS); + val |= 0x100000 | region; + return val; +} + +static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr)) + return -EOPNOTSUPP; + + return ar_pci->targ_cpu_to_ce_addr(ar, addr); +} + +/* * Diagnostic read/write access is provided for startup/config/debug usage. * Caller must guarantee proper alignment, when applicable, and single user * at any moment. @@ -106,36 +906,16 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret = 0; - u32 buf; - unsigned int completed_nbytes, orig_nbytes, remaining_bytes; - unsigned int id; - unsigned int flags; - struct ce_state *ce_diag; + u32 *buf; + unsigned int completed_nbytes, alloc_nbytes, remaining_bytes; + struct ath10k_ce_pipe *ce_diag; /* Host buffer address in CE space */ u32 ce_data; dma_addr_t ce_data_base = 0; - void *data_buf = NULL; + void *data_buf; int i; - /* - * This code cannot handle reads to non-memory space. Redirect to the - * register read fn but preserve the multi word read capability of - * this fn - */ - if (address < DRAM_BASE_ADDRESS) { - if (!IS_ALIGNED(address, 4) || - !IS_ALIGNED((unsigned long)data, 4)) - return -EIO; - - while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access( - ar, address, (u32 *)data)) == 0)) { - nbytes -= sizeof(u32); - address += sizeof(u32); - data += sizeof(u32); - } - return ret; - } - + mutex_lock(&ar_pci->ce_diag_mutex); ce_diag = ar_pci->ce_diag; /* @@ -144,74 +924,57 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, * 1) 4-byte alignment * 2) Buffer in DMA-able space */ - orig_nbytes = nbytes; - data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev, - orig_nbytes, - &ce_data_base); + alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); + data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base, + GFP_ATOMIC); if (!data_buf) { ret = -ENOMEM; goto done; } - memset(data_buf, 0, orig_nbytes); - remaining_bytes = orig_nbytes; + /* The address supplied by the caller is in the + * Target CPU virtual address space. + * + * In order to use this address with the diagnostic CE, + * convert it from Target CPU virtual address space + * to CE address space + */ + address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); + + remaining_bytes = nbytes; ce_data = ce_data_base; while (remaining_bytes) { nbytes = min_t(unsigned int, remaining_bytes, DIAG_TRANSFER_LIMIT); - ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data); + ret = ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data); if (ret != 0) goto done; /* Request CE to send from Target(!) address to Host buffer */ - /* - * The address supplied by the caller is in the - * Target CPU virtual address space. - * - * In order to use this address with the diagnostic CE, - * convert it from Target CPU virtual address space - * to CE address space - */ - ath10k_pci_wake(ar); - address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, - address); - ath10k_pci_sleep(ar); - - ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, - 0); + ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, 0); if (ret) goto done; i = 0; - while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf, - &completed_nbytes, - &id) != 0) { - mdelay(1); - if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { + while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) { + udelay(DIAG_ACCESS_CE_WAIT_US); + i += DIAG_ACCESS_CE_WAIT_US; + + if (i > DIAG_ACCESS_CE_TIMEOUT_US) { ret = -EBUSY; goto done; } } - if (nbytes != completed_nbytes) { - ret = -EIO; - goto done; - } - - if (buf != (u32) address) { - ret = -EIO; - goto done; - } - i = 0; - while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf, - &completed_nbytes, - &id, &flags) != 0) { - mdelay(1); + while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf, + &completed_nbytes) != 0) { + udelay(DIAG_ACCESS_CE_WAIT_US); + i += DIAG_ACCESS_CE_WAIT_US; - if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { + if (i > DIAG_ACCESS_CE_TIMEOUT_US) { ret = -EBUSY; goto done; } @@ -222,64 +985,81 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, goto done; } - if (buf != ce_data) { + if (*buf != ce_data) { ret = -EIO; goto done; } remaining_bytes -= nbytes; + memcpy(data, data_buf, nbytes); + address += nbytes; - ce_data += nbytes; + data += nbytes; } done: - if (ret == 0) { - /* Copy data from allocated DMA buf to caller's buf */ - WARN_ON_ONCE(orig_nbytes & 3); - for (i = 0; i < orig_nbytes / sizeof(__le32); i++) { - ((u32 *)data)[i] = - __le32_to_cpu(((__le32 *)data_buf)[i]); - } - } else - ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", - __func__, address); if (data_buf) - pci_free_consistent(ar_pci->pdev, orig_nbytes, - data_buf, ce_data_base); + dma_free_coherent(ar->dev, alloc_nbytes, data_buf, + ce_data_base); + + mutex_unlock(&ar_pci->ce_diag_mutex); return ret; } -/* Read 4-byte aligned data from Target memory or register */ -static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address, - u32 *data) +static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value) { - /* Assume range doesn't cross this boundary */ - if (address >= DRAM_BASE_ADDRESS) - return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32)); + __le32 val = 0; + int ret; + + ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val)); + *value = __le32_to_cpu(val); + + return ret; +} + +static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest, + u32 src, u32 len) +{ + u32 host_addr, addr; + int ret; + + host_addr = host_interest_item_address(src); + + ret = ath10k_pci_diag_read32(ar, host_addr, &addr); + if (ret != 0) { + ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n", + src, ret); + return ret; + } + + ret = ath10k_pci_diag_read_mem(ar, addr, dest, len); + if (ret != 0) { + ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n", + addr, len, ret); + return ret; + } - ath10k_pci_wake(ar); - *data = ath10k_pci_read32(ar, address); - ath10k_pci_sleep(ar); return 0; } -static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, - const void *data, int nbytes) +#define ath10k_pci_diag_read_hi(ar, dest, src, len) \ + __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len) + +int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, + const void *data, int nbytes) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret = 0; - u32 buf; - unsigned int completed_nbytes, orig_nbytes, remaining_bytes; - unsigned int id; - unsigned int flags; - struct ce_state *ce_diag; - void *data_buf = NULL; - u32 ce_data; /* Host buffer address in CE space */ + u32 *buf; + unsigned int completed_nbytes, alloc_nbytes, remaining_bytes; + struct ath10k_ce_pipe *ce_diag; + void *data_buf; dma_addr_t ce_data_base = 0; int i; + mutex_lock(&ar_pci->ce_diag_mutex); ce_diag = ar_pci->ce_diag; /* @@ -288,20 +1068,15 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, * 1) 4-byte alignment * 2) Buffer in DMA-able space */ - orig_nbytes = nbytes; - data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev, - orig_nbytes, - &ce_data_base); + alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); + + data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base, + GFP_ATOMIC); if (!data_buf) { ret = -ENOMEM; goto done; } - /* Copy caller's data to allocated DMA buf */ - WARN_ON_ONCE(orig_nbytes & 3); - for (i = 0; i < orig_nbytes / sizeof(__le32); i++) - ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]); - /* * The address supplied by the caller is in the * Target CPU virtual address space. @@ -312,18 +1087,18 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, * to * CE address space */ - ath10k_pci_wake(ar); - address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address); - ath10k_pci_sleep(ar); + address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); - remaining_bytes = orig_nbytes; - ce_data = ce_data_base; + remaining_bytes = nbytes; while (remaining_bytes) { /* FIXME: check cast */ nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); + /* Copy caller's data to allocated DMA buf */ + memcpy(data_buf, data, nbytes); + /* Set up to receive directly into Target(!) address */ - ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address); + ret = ath10k_ce_rx_post_buf(ce_diag, &address, address); if (ret != 0) goto done; @@ -331,40 +1106,28 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, * Request CE to send caller-supplied data that * was copied to bounce buffer to Target(!) address. */ - ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data, - nbytes, 0, 0); + ret = ath10k_ce_send(ce_diag, NULL, ce_data_base, nbytes, 0, 0); if (ret != 0) goto done; i = 0; - while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf, - &completed_nbytes, - &id) != 0) { - mdelay(1); + while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) { + udelay(DIAG_ACCESS_CE_WAIT_US); + i += DIAG_ACCESS_CE_WAIT_US; - if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { + if (i > DIAG_ACCESS_CE_TIMEOUT_US) { ret = -EBUSY; goto done; } } - if (nbytes != completed_nbytes) { - ret = -EIO; - goto done; - } - - if (buf != ce_data) { - ret = -EIO; - goto done; - } - i = 0; - while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf, - &completed_nbytes, - &id, &flags) != 0) { - mdelay(1); + while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf, + &completed_nbytes) != 0) { + udelay(DIAG_ACCESS_CE_WAIT_US); + i += DIAG_ACCESS_CE_WAIT_US; - if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { + if (i > DIAG_ACCESS_CE_TIMEOUT_US) { ret = -EBUSY; goto done; } @@ -375,840 +1138,899 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, goto done; } - if (buf != address) { + if (*buf != address) { ret = -EIO; goto done; } remaining_bytes -= nbytes; address += nbytes; - ce_data += nbytes; + data += nbytes; } done: if (data_buf) { - pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf, - ce_data_base); + dma_free_coherent(ar->dev, alloc_nbytes, data_buf, + ce_data_base); } if (ret != 0) - ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__, - address); + ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n", + address, ret); + + mutex_unlock(&ar_pci->ce_diag_mutex); return ret; } -/* Write 4B data to Target memory or register */ -static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address, - u32 data) +static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value) { - /* Assume range doesn't cross this boundary */ - if (address >= DRAM_BASE_ADDRESS) - return ath10k_pci_diag_write_mem(ar, address, &data, - sizeof(u32)); + __le32 val = __cpu_to_le32(value); - ath10k_pci_wake(ar); - ath10k_pci_write32(ar, address, data); - ath10k_pci_sleep(ar); - return 0; + return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val)); } -static bool ath10k_pci_target_is_awake(struct ath10k *ar) +/* Called by lower (CE) layer when a send to Target completes. */ +static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state) { - void __iomem *mem = ath10k_pci_priv(ar)->mem; - u32 val; - val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + - RTC_STATE_ADDRESS); - return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON); -} + struct ath10k *ar = ce_state->ar; + struct sk_buff_head list; + struct sk_buff *skb; -static void ath10k_pci_wait(struct ath10k *ar) -{ - int n = 100; + __skb_queue_head_init(&list); + while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { + /* no need to call tx completion for NULL pointers */ + if (skb == NULL) + continue; - while (n-- && !ath10k_pci_target_is_awake(ar)) - msleep(10); + __skb_queue_tail(&list, skb); + } - if (n < 0) - ath10k_warn("Unable to wakeup target\n"); + while ((skb = __skb_dequeue(&list))) + ath10k_htc_tx_completion_handler(ar, skb); } -void ath10k_do_pci_wake(struct ath10k *ar) +static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state, + void (*callback)(struct ath10k *ar, + struct sk_buff *skb)) { + struct ath10k *ar = ce_state->ar; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - void __iomem *pci_addr = ar_pci->mem; - int tot_delay = 0; - int curr_delay = 5; - - if (atomic_read(&ar_pci->keep_awake_count) == 0) { - /* Force AWAKE */ - iowrite32(PCIE_SOC_WAKE_V_MASK, - pci_addr + PCIE_LOCAL_BASE_ADDRESS + - PCIE_SOC_WAKE_ADDRESS); - } - atomic_inc(&ar_pci->keep_awake_count); - - if (ar_pci->verified_awake) - return; + struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; + struct sk_buff *skb; + struct sk_buff_head list; + void *transfer_context; + unsigned int nbytes, max_nbytes; - for (;;) { - if (ath10k_pci_target_is_awake(ar)) { - ar_pci->verified_awake = true; - break; - } + __skb_queue_head_init(&list); + while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, + &nbytes) == 0) { + skb = transfer_context; + max_nbytes = skb->len + skb_tailroom(skb); + dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, + max_nbytes, DMA_FROM_DEVICE); - if (tot_delay > PCIE_WAKE_TIMEOUT) { - ath10k_warn("target takes too long to wake up (awake count %d)\n", - atomic_read(&ar_pci->keep_awake_count)); - break; + if (unlikely(max_nbytes < nbytes)) { + ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)", + nbytes, max_nbytes); + dev_kfree_skb_any(skb); + continue; } - udelay(curr_delay); - tot_delay += curr_delay; - - if (curr_delay < 50) - curr_delay += 5; + skb_put(skb, nbytes); + __skb_queue_tail(&list, skb); } -} -void ath10k_do_pci_sleep(struct ath10k *ar) -{ - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - void __iomem *pci_addr = ar_pci->mem; + while ((skb = __skb_dequeue(&list))) { + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n", + ce_state->id, skb->len); + ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ", + skb->data, skb->len); - if (atomic_dec_and_test(&ar_pci->keep_awake_count)) { - /* Allow sleep */ - ar_pci->verified_awake = false; - iowrite32(PCIE_SOC_WAKE_RESET, - pci_addr + PCIE_LOCAL_BASE_ADDRESS + - PCIE_SOC_WAKE_ADDRESS); + callback(ar, skb); } -} -/* - * FIXME: Handle OOM properly. - */ -static inline -struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info) -{ - struct ath10k_pci_compl *compl = NULL; - - spin_lock_bh(&pipe_info->pipe_lock); - if (list_empty(&pipe_info->compl_free)) { - ath10k_warn("Completion buffers are full\n"); - goto exit; - } - compl = list_first_entry(&pipe_info->compl_free, - struct ath10k_pci_compl, list); - list_del(&compl->list); -exit: - spin_unlock_bh(&pipe_info->pipe_lock); - return compl; + ath10k_pci_rx_post_pipe(pipe_info); } -/* Called by lower (CE) layer when a send to Target completes. */ -static void ath10k_pci_ce_send_done(struct ce_state *ce_state, - void *transfer_context, - u32 ce_data, - unsigned int nbytes, - unsigned int transfer_id) +static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state, + void (*callback)(struct ath10k *ar, + struct sk_buff *skb)) { struct ath10k *ar = ce_state->ar; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id]; - struct ath10k_pci_compl *compl; - bool process = false; + struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; + struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl; + struct sk_buff *skb; + struct sk_buff_head list; + void *transfer_context; + unsigned int nbytes, max_nbytes, nentries; + int orig_len; + + /* No need to acquire ce_lock for CE5, since this is the only place CE5 + * is processed other than init and deinit. Before releasing CE5 + * buffers, interrupts are disabled. Thus CE5 access is serialized. + */ + __skb_queue_head_init(&list); + while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context, + &nbytes) == 0) { + skb = transfer_context; + max_nbytes = skb->len + skb_tailroom(skb); - do { - /* - * For the send completion of an item in sendlist, just - * increment num_sends_allowed. The upper layer callback will - * be triggered when last fragment is done with send. - */ - if (transfer_context == CE_SENDLIST_ITEM_CTXT) { - spin_lock_bh(&pipe_info->pipe_lock); - pipe_info->num_sends_allowed++; - spin_unlock_bh(&pipe_info->pipe_lock); + if (unlikely(max_nbytes < nbytes)) { + ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)", + nbytes, max_nbytes); continue; } - compl = get_free_compl(pipe_info); - if (!compl) - break; + dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, + max_nbytes, DMA_FROM_DEVICE); + skb_put(skb, nbytes); + __skb_queue_tail(&list, skb); + } - compl->send_or_recv = HIF_CE_COMPLETE_SEND; - compl->ce_state = ce_state; - compl->pipe_info = pipe_info; - compl->transfer_context = transfer_context; - compl->nbytes = nbytes; - compl->transfer_id = transfer_id; - compl->flags = 0; + nentries = skb_queue_len(&list); + while ((skb = __skb_dequeue(&list))) { + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n", + ce_state->id, skb->len); + ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ", + skb->data, skb->len); - /* - * Add the completion to the processing queue. - */ - spin_lock_bh(&ar_pci->compl_lock); - list_add_tail(&compl->list, &ar_pci->compl_process); - spin_unlock_bh(&ar_pci->compl_lock); + orig_len = skb->len; + callback(ar, skb); + skb_push(skb, orig_len - skb->len); + skb_reset_tail_pointer(skb); + skb_trim(skb, 0); - process = true; - } while (ath10k_ce_completed_send_next(ce_state, - &transfer_context, - &ce_data, &nbytes, - &transfer_id) == 0); + /*let device gain the buffer again*/ + dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + } + ath10k_ce_rx_update_write_idx(ce_pipe, nentries); +} - /* - * If only some of the items within a sendlist have completed, - * don't invoke completion processing until the entire sendlist - * has been sent. +/* Called by lower (CE) layer when data is received from the Target. */ +static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state) +{ + ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); +} + +static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state) +{ + /* CE4 polling needs to be done whenever CE pipe which transports + * HTT Rx (target->host) is processed. */ - if (!process) - return; + ath10k_ce_per_engine_service(ce_state->ar, 4); - ath10k_pci_process_ce(ar); + ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); } -/* Called by lower (CE) layer when data is received from the Target. */ -static void ath10k_pci_ce_recv_data(struct ce_state *ce_state, - void *transfer_context, u32 ce_data, - unsigned int nbytes, - unsigned int transfer_id, - unsigned int flags) +/* Called by lower (CE) layer when data is received from the Target. + * Only 10.4 firmware uses separate CE to transfer pktlog data. + */ +static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state) +{ + ath10k_pci_process_rx_cb(ce_state, + ath10k_htt_rx_pktlog_completion_handler); +} + +/* Called by lower (CE) layer when a send to HTT Target completes. */ +static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state) { struct ath10k *ar = ce_state->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id]; - struct ath10k_pci_compl *compl; struct sk_buff *skb; - do { - compl = get_free_compl(pipe_info); - if (!compl) - break; - - compl->send_or_recv = HIF_CE_COMPLETE_RECV; - compl->ce_state = ce_state; - compl->pipe_info = pipe_info; - compl->transfer_context = transfer_context; - compl->nbytes = nbytes; - compl->transfer_id = transfer_id; - compl->flags = flags; + while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { + /* no need to call tx completion for NULL pointers */ + if (!skb) + continue; - skb = transfer_context; dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, - skb->len + skb_tailroom(skb), - DMA_FROM_DEVICE); - /* - * Add the completion to the processing queue. - */ - spin_lock_bh(&ar_pci->compl_lock); - list_add_tail(&compl->list, &ar_pci->compl_process); - spin_unlock_bh(&ar_pci->compl_lock); + skb->len, DMA_TO_DEVICE); + ath10k_htt_hif_tx_complete(ar, skb); + } +} + +static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb) +{ + skb_pull(skb, sizeof(struct ath10k_htc_hdr)); + ath10k_htt_t2h_msg_handler(ar, skb); +} - } while (ath10k_ce_completed_recv_next(ce_state, - &transfer_context, - &ce_data, &nbytes, - &transfer_id, - &flags) == 0); +/* Called by lower (CE) layer when HTT data is received from the Target. */ +static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state) +{ + /* CE4 polling needs to be done whenever CE pipe which transports + * HTT Rx (target->host) is processed. + */ + ath10k_ce_per_engine_service(ce_state->ar, 4); - ath10k_pci_process_ce(ar); + ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver); } -/* Send the first nbytes bytes of the buffer */ -static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id, - unsigned int transfer_id, - unsigned int bytes, struct sk_buff *nbuf) +int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, + struct ath10k_hif_sg_item *items, int n_items) { - struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf); struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]); - struct ce_state *ce_hdl = pipe_info->ce_hdl; - struct ce_sendlist sendlist; - unsigned int len; - u32 flags = 0; - int ret; - - memset(&sendlist, 0, sizeof(struct ce_sendlist)); + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id]; + struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl; + struct ath10k_ce_ring *src_ring = ce_pipe->src_ring; + unsigned int nentries_mask; + unsigned int sw_index; + unsigned int write_index; + int err, i = 0; + + spin_lock_bh(&ce->ce_lock); + + nentries_mask = src_ring->nentries_mask; + sw_index = src_ring->sw_index; + write_index = src_ring->write_index; + + if (unlikely(CE_RING_DELTA(nentries_mask, + write_index, sw_index - 1) < n_items)) { + err = -ENOBUFS; + goto err; + } + + for (i = 0; i < n_items - 1; i++) { + ath10k_dbg(ar, ATH10K_DBG_PCI, + "pci tx item %d paddr %pad len %d n_items %d\n", + i, &items[i].paddr, items[i].len, n_items); + ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", + items[i].vaddr, items[i].len); + + err = ath10k_ce_send_nolock(ce_pipe, + items[i].transfer_context, + items[i].paddr, + items[i].len, + items[i].transfer_id, + CE_SEND_FLAG_GATHER); + if (err) + goto err; + } - len = min(bytes, nbuf->len); - bytes -= len; + /* `i` is equal to `n_items -1` after for() */ - if (len & 3) - ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len); + ath10k_dbg(ar, ATH10K_DBG_PCI, + "pci tx item %d paddr %pad len %d n_items %d\n", + i, &items[i].paddr, items[i].len, n_items); + ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", + items[i].vaddr, items[i].len); - ath10k_dbg(ATH10K_DBG_PCI, - "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n", - nbuf->data, (unsigned long long) skb_cb->paddr, - nbuf->len, len); - ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, - "ath10k tx: data: ", - nbuf->data, nbuf->len); + err = ath10k_ce_send_nolock(ce_pipe, + items[i].transfer_context, + items[i].paddr, + items[i].len, + items[i].transfer_id, + 0); + if (err) + goto err; - ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags); + spin_unlock_bh(&ce->ce_lock); + return 0; - /* Make sure we have resources to handle this request */ - spin_lock_bh(&pipe_info->pipe_lock); - if (!pipe_info->num_sends_allowed) { - ath10k_warn("Pipe: %d is full\n", pipe_id); - spin_unlock_bh(&pipe_info->pipe_lock); - return -ENOSR; - } - pipe_info->num_sends_allowed--; - spin_unlock_bh(&pipe_info->pipe_lock); +err: + for (; i > 0; i--) + __ath10k_ce_send_revert(ce_pipe); - ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id); - if (ret) - ath10k_warn("CE send failed: %p\n", nbuf); + spin_unlock_bh(&ce->ce_lock); + return err; +} - return ret; +int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf, + size_t buf_len) +{ + return ath10k_pci_diag_read_mem(ar, address, buf, buf_len); } -static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) +u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]); - int ret; - spin_lock_bh(&pipe_info->pipe_lock); - ret = pipe_info->num_sends_allowed; - spin_unlock_bh(&pipe_info->pipe_lock); + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n"); - return ret; + return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl); } -static void ath10k_pci_hif_dump_area(struct ath10k *ar) +static void ath10k_pci_dump_registers(struct ath10k *ar, + struct ath10k_fw_crash_data *crash_data) { - u32 reg_dump_area = 0; - u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {}; - u32 host_addr; - int ret; - u32 i; - - ath10k_err("firmware crashed!\n"); - ath10k_err("hardware name %s version 0x%x\n", - ar->hw_params.name, ar->target_version); - ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major, - ar->fw_version_minor, ar->fw_version_release, - ar->fw_version_build); - - host_addr = host_interest_item_address(HI_ITEM(hi_failure_state)); - if (ath10k_pci_diag_read_mem(ar, host_addr, - ®_dump_area, sizeof(u32)) != 0) { - ath10k_warn("could not read hi_failure_state\n"); - return; - } + __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {}; + int i, ret; - ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area); + lockdep_assert_held(&ar->dump_mutex); - ret = ath10k_pci_diag_read_mem(ar, reg_dump_area, - ®_dump_values[0], - REG_DUMP_COUNT_QCA988X * sizeof(u32)); - if (ret != 0) { - ath10k_err("could not dump FW Dump Area\n"); + ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0], + hi_failure_state, + REG_DUMP_COUNT_QCA988X * sizeof(__le32)); + if (ret) { + ath10k_err(ar, "failed to read firmware dump area: %d\n", ret); return; } BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4); - ath10k_err("target Register Dump\n"); + ath10k_err(ar, "firmware register dump:\n"); for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4) - ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n", + ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n", i, - reg_dump_values[i], - reg_dump_values[i + 1], - reg_dump_values[i + 2], - reg_dump_values[i + 3]); -} + __le32_to_cpu(reg_dump_values[i]), + __le32_to_cpu(reg_dump_values[i + 1]), + __le32_to_cpu(reg_dump_values[i + 2]), + __le32_to_cpu(reg_dump_values[i + 3])); -static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, - int force) -{ - if (!force) { - int resources; - /* - * Decide whether to actually poll for completions, or just - * wait for a later chance. - * If there seem to be plenty of resources left, then just wait - * since checking involves reading a CE register, which is a - * relatively expensive operation. - */ - resources = ath10k_pci_hif_get_free_queue_number(ar, pipe); + if (!crash_data) + return; - /* - * If at least 50% of the total resources are still available, - * don't bother checking again yet. - */ - if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1)) - return; - } - ath10k_ce_per_engine_service(ar, pipe); + for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++) + crash_data->registers[i] = reg_dump_values[i]; } -static void ath10k_pci_hif_post_init(struct ath10k *ar, - struct ath10k_hif_cb *callbacks) +static int ath10k_pci_dump_memory_section(struct ath10k *ar, + const struct ath10k_mem_region *mem_region, + u8 *buf, size_t buf_len) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + const struct ath10k_mem_section *cur_section, *next_section; + unsigned int count, section_size, skip_size; + int ret, i, j; - ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); + if (!mem_region || !buf) + return 0; - memcpy(&ar_pci->msg_callbacks_current, callbacks, - sizeof(ar_pci->msg_callbacks_current)); -} + cur_section = &mem_region->section_table.sections[0]; -static int ath10k_pci_start_ce(struct ath10k *ar) -{ - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ce_state *ce_diag = ar_pci->ce_diag; - const struct ce_attr *attr; - struct hif_ce_pipe_info *pipe_info; - struct ath10k_pci_compl *compl; - int i, pipe_num, completions, disable_interrupts; + if (mem_region->start > cur_section->start) { + ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n", + mem_region->start, cur_section->start); + return 0; + } - spin_lock_init(&ar_pci->compl_lock); - INIT_LIST_HEAD(&ar_pci->compl_process); + skip_size = cur_section->start - mem_region->start; - for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { - pipe_info = &ar_pci->pipe_info[pipe_num]; + /* fill the gap between the first register section and register + * start address + */ + for (i = 0; i < skip_size; i++) { + *buf = ATH10K_MAGIC_NOT_COPIED; + buf++; + } - spin_lock_init(&pipe_info->pipe_lock); - INIT_LIST_HEAD(&pipe_info->compl_free); + count = 0; - /* Handle Diagnostic CE specially */ - if (pipe_info->ce_hdl == ce_diag) - continue; + for (i = 0; cur_section != NULL; i++) { + section_size = cur_section->end - cur_section->start; - attr = &host_ce_config_wlan[pipe_num]; - completions = 0; + if (section_size <= 0) { + ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n", + cur_section->start, + cur_section->end); + break; + } + + if ((i + 1) == mem_region->section_table.size) { + /* last section */ + next_section = NULL; + skip_size = 0; + } else { + next_section = cur_section + 1; + + if (cur_section->end > next_section->start) { + ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n", + next_section->start, + cur_section->end); + break; + } - if (attr->src_nentries) { - disable_interrupts = attr->flags & CE_ATTR_DIS_INTR; - ath10k_ce_send_cb_register(pipe_info->ce_hdl, - ath10k_pci_ce_send_done, - disable_interrupts); - completions += attr->src_nentries; - pipe_info->num_sends_allowed = attr->src_nentries - 1; + skip_size = next_section->start - cur_section->end; } - if (attr->dest_nentries) { - ath10k_ce_recv_cb_register(pipe_info->ce_hdl, - ath10k_pci_ce_recv_data); - completions += attr->dest_nentries; + if (buf_len < (skip_size + section_size)) { + ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len); + break; } - if (completions == 0) - continue; + buf_len -= skip_size + section_size; - for (i = 0; i < completions; i++) { - compl = kmalloc(sizeof(struct ath10k_pci_compl), - GFP_KERNEL); - if (!compl) { - ath10k_warn("No memory for completion state\n"); - ath10k_pci_stop_ce(ar); - return -ENOMEM; - } + /* read section to dest memory */ + ret = ath10k_pci_diag_read_mem(ar, cur_section->start, + buf, section_size); + if (ret) { + ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n", + cur_section->start, ret); + break; + } - compl->send_or_recv = HIF_CE_COMPLETE_FREE; - list_add_tail(&compl->list, &pipe_info->compl_free); + buf += section_size; + count += section_size; + + /* fill in the gap between this section and the next */ + for (j = 0; j < skip_size; j++) { + *buf = ATH10K_MAGIC_NOT_COPIED; + buf++; } + + count += skip_size; + + if (!next_section) + /* this was the last section */ + break; + + cur_section = next_section; } - return 0; + return count; } -static void ath10k_pci_stop_ce(struct ath10k *ar) +static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_pci_compl *compl; - struct sk_buff *skb; - int i; + u32 val; - ath10k_ce_disable_interrupts(ar); + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + + FW_RAM_CONFIG_ADDRESS, config); + + val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + + FW_RAM_CONFIG_ADDRESS); + if (val != config) { + ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n", + val, config); + return -EIO; + } - /* Cancel the pending tasklet */ - tasklet_kill(&ar_pci->intr_tq); + return 0; +} - for (i = 0; i < CE_COUNT; i++) - tasklet_kill(&ar_pci->pipe_info[i].intr); +/* Always returns the length */ +static int ath10k_pci_dump_memory_sram(struct ath10k *ar, + const struct ath10k_mem_region *region, + u8 *buf) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + u32 base_addr, i; + + base_addr = ioread32(ar_pci->mem + QCA99X0_PCIE_BAR0_START_REG); + base_addr += region->start; - /* Mark pending completions as aborted, so that upper layers free up - * their associated resources */ - spin_lock_bh(&ar_pci->compl_lock); - list_for_each_entry(compl, &ar_pci->compl_process, list) { - skb = (struct sk_buff *)compl->transfer_context; - ATH10K_SKB_CB(skb)->is_aborted = true; + for (i = 0; i < region->len; i += 4) { + iowrite32(base_addr + i, ar_pci->mem + QCA99X0_CPU_MEM_ADDR_REG); + *(u32 *)(buf + i) = ioread32(ar_pci->mem + QCA99X0_CPU_MEM_DATA_REG); } - spin_unlock_bh(&ar_pci->compl_lock); + + return region->len; } -static void ath10k_pci_cleanup_ce(struct ath10k *ar) +/* if an error happened returns < 0, otherwise the length */ +static int ath10k_pci_dump_memory_reg(struct ath10k *ar, + const struct ath10k_mem_region *region, + u8 *buf) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_pci_compl *compl, *tmp; - struct hif_ce_pipe_info *pipe_info; - struct sk_buff *netbuf; - int pipe_num; - - /* Free pending completions. */ - spin_lock_bh(&ar_pci->compl_lock); - if (!list_empty(&ar_pci->compl_process)) - ath10k_warn("pending completions still present! possible memory leaks.\n"); + u32 i; + int ret; - list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) { - list_del(&compl->list); - netbuf = (struct sk_buff *)compl->transfer_context; - dev_kfree_skb_any(netbuf); - kfree(compl); + mutex_lock(&ar->conf_mutex); + if (ar->state != ATH10K_STATE_ON) { + ath10k_warn(ar, "Skipping pci_dump_memory_reg invalid state\n"); + ret = -EIO; + goto done; } - spin_unlock_bh(&ar_pci->compl_lock); - /* Free unused completions for each pipe. */ - for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { - pipe_info = &ar_pci->pipe_info[pipe_num]; + for (i = 0; i < region->len; i += 4) + *(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i); - spin_lock_bh(&pipe_info->pipe_lock); - list_for_each_entry_safe(compl, tmp, - &pipe_info->compl_free, list) { - list_del(&compl->list); - kfree(compl); - } - spin_unlock_bh(&pipe_info->pipe_lock); + ret = region->len; +done: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +/* if an error happened returns < 0, otherwise the length */ +static int ath10k_pci_dump_memory_generic(struct ath10k *ar, + const struct ath10k_mem_region *current_region, + u8 *buf) +{ + int ret; + + if (current_region->section_table.size > 0) + /* Copy each section individually. */ + return ath10k_pci_dump_memory_section(ar, + current_region, + buf, + current_region->len); + + /* No individual memory sections defined so we can + * copy the entire memory region. + */ + ret = ath10k_pci_diag_read_mem(ar, + current_region->start, + buf, + current_region->len); + if (ret) { + ath10k_warn(ar, "failed to copy ramdump region %s: %d\n", + current_region->name, ret); + return ret; } + + return current_region->len; } -static void ath10k_pci_process_ce(struct ath10k *ar) +static void ath10k_pci_dump_memory(struct ath10k *ar, + struct ath10k_fw_crash_data *crash_data) { - struct ath10k_pci *ar_pci = ar->hif.priv; - struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current; - struct ath10k_pci_compl *compl; - struct sk_buff *skb; - unsigned int nbytes; - int ret, send_done = 0; + const struct ath10k_hw_mem_layout *mem_layout; + const struct ath10k_mem_region *current_region; + struct ath10k_dump_ram_data_hdr *hdr; + u32 count, shift; + size_t buf_len; + int ret, i; + u8 *buf; - /* Upper layers aren't ready to handle tx/rx completions in parallel so - * we must serialize all completion processing. */ + lockdep_assert_held(&ar->dump_mutex); - spin_lock_bh(&ar_pci->compl_lock); - if (ar_pci->compl_processing) { - spin_unlock_bh(&ar_pci->compl_lock); + if (!crash_data) return; - } - ar_pci->compl_processing = true; - spin_unlock_bh(&ar_pci->compl_lock); - for (;;) { - spin_lock_bh(&ar_pci->compl_lock); - if (list_empty(&ar_pci->compl_process)) { - spin_unlock_bh(&ar_pci->compl_lock); + mem_layout = ath10k_coredump_get_mem_layout(ar); + if (!mem_layout) + return; + + current_region = &mem_layout->region_table.regions[0]; + + buf = crash_data->ramdump_buf; + buf_len = crash_data->ramdump_buf_len; + + memset(buf, 0, buf_len); + + for (i = 0; i < mem_layout->region_table.size; i++) { + count = 0; + + if (current_region->len > buf_len) { + ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n", + current_region->name, + current_region->len, + buf_len); break; } - compl = list_first_entry(&ar_pci->compl_process, - struct ath10k_pci_compl, list); - list_del(&compl->list); - spin_unlock_bh(&ar_pci->compl_lock); - - if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) { - cb->tx_completion(ar, - compl->transfer_context, - compl->transfer_id); - send_done = 1; - } else { - ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1); + + /* To get IRAM dump, the host driver needs to switch target + * ram config from DRAM to IRAM. + */ + if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 || + current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) { + shift = current_region->start >> 20; + + ret = ath10k_pci_set_ram_config(ar, shift); if (ret) { - ath10k_warn("Unable to post recv buffer for pipe: %d\n", - compl->pipe_info->pipe_num); + ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n", + current_region->name, ret); break; } + } - skb = (struct sk_buff *)compl->transfer_context; - nbytes = compl->nbytes; - - ath10k_dbg(ATH10K_DBG_PCI, - "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n", - skb, nbytes); - ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, - "ath10k rx: ", skb->data, nbytes); - - if (skb->len + skb_tailroom(skb) >= nbytes) { - skb_trim(skb, 0); - skb_put(skb, nbytes); - cb->rx_completion(ar, skb, - compl->pipe_info->pipe_num); - } else { - ath10k_warn("rxed more than expected (nbytes %d, max %d)", - nbytes, - skb->len + skb_tailroom(skb)); - } + /* Reserve space for the header. */ + hdr = (void *)buf; + buf += sizeof(*hdr); + buf_len -= sizeof(*hdr); + + switch (current_region->type) { + case ATH10K_MEM_REGION_TYPE_IOSRAM: + count = ath10k_pci_dump_memory_sram(ar, current_region, buf); + break; + case ATH10K_MEM_REGION_TYPE_IOREG: + ret = ath10k_pci_dump_memory_reg(ar, current_region, buf); + if (ret < 0) + break; + + count = ret; + break; + default: + ret = ath10k_pci_dump_memory_generic(ar, current_region, buf); + if (ret < 0) + break; + + count = ret; + break; } - compl->send_or_recv = HIF_CE_COMPLETE_FREE; + hdr->region_type = cpu_to_le32(current_region->type); + hdr->start = cpu_to_le32(current_region->start); + hdr->length = cpu_to_le32(count); - /* - * Add completion back to the pipe's free list. - */ - spin_lock_bh(&compl->pipe_info->pipe_lock); - list_add_tail(&compl->list, &compl->pipe_info->compl_free); - compl->pipe_info->num_sends_allowed += send_done; - spin_unlock_bh(&compl->pipe_info->pipe_lock); - } + if (count == 0) + /* Note: the header remains, just with zero length. */ + break; + + buf += count; + buf_len -= count; - spin_lock_bh(&ar_pci->compl_lock); - ar_pci->compl_processing = false; - spin_unlock_bh(&ar_pci->compl_lock); + current_region++; + } } -/* TODO - temporary mapping while we have too few CE's */ -static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, - u16 service_id, u8 *ul_pipe, - u8 *dl_pipe, int *ul_is_polled, - int *dl_is_polled) +static void ath10k_pci_fw_dump_work(struct work_struct *work) { - int ret = 0; - - /* polling for received messages not supported */ - *dl_is_polled = 0; + struct ath10k_pci *ar_pci = container_of(work, struct ath10k_pci, + dump_work); + struct ath10k_fw_crash_data *crash_data; + struct ath10k *ar = ar_pci->ar; + char guid[UUID_STRING_LEN + 1]; - switch (service_id) { - case ATH10K_HTC_SVC_ID_HTT_DATA_MSG: - /* - * Host->target HTT gets its own pipe, so it can be polled - * while other pipes are interrupt driven. - */ - *ul_pipe = 4; - /* - * Use the same target->host pipe for HTC ctrl, HTC raw - * streams, and HTT. - */ - *dl_pipe = 1; - break; + mutex_lock(&ar->dump_mutex); - case ATH10K_HTC_SVC_ID_RSVD_CTRL: - case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS: - /* - * Note: HTC_RAW_STREAMS_SVC is currently unused, and - * HTC_CTRL_RSVD_SVC could share the same pipe as the - * WMI services. So, if another CE is needed, change - * this to *ul_pipe = 3, which frees up CE 0. - */ - /* *ul_pipe = 3; */ - *ul_pipe = 0; - *dl_pipe = 1; - break; + spin_lock_bh(&ar->data_lock); + ar->stats.fw_crash_counter++; + spin_unlock_bh(&ar->data_lock); - case ATH10K_HTC_SVC_ID_WMI_DATA_BK: - case ATH10K_HTC_SVC_ID_WMI_DATA_BE: - case ATH10K_HTC_SVC_ID_WMI_DATA_VI: - case ATH10K_HTC_SVC_ID_WMI_DATA_VO: + crash_data = ath10k_coredump_new(ar); - case ATH10K_HTC_SVC_ID_WMI_CONTROL: - *ul_pipe = 3; - *dl_pipe = 2; - break; + if (crash_data) + scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid); + else + scnprintf(guid, sizeof(guid), "n/a"); - /* pipe 5 unused */ - /* pipe 6 reserved */ - /* pipe 7 reserved */ + ath10k_err(ar, "firmware crashed! (guid %s)\n", guid); + ath10k_print_driver_info(ar); + ath10k_pci_dump_registers(ar, crash_data); + ath10k_ce_dump_registers(ar, crash_data); + ath10k_pci_dump_memory(ar, crash_data); - default: - ret = -1; - break; - } - *ul_is_polled = - (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0; + mutex_unlock(&ar->dump_mutex); - return ret; + ath10k_core_start_recovery(ar); } -static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, - u8 *ul_pipe, u8 *dl_pipe) +static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) { - int ul_is_polled, dl_is_polled; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - (void)ath10k_pci_hif_map_service_to_pipe(ar, - ATH10K_HTC_SVC_ID_RSVD_CTRL, - ul_pipe, - dl_pipe, - &ul_is_polled, - &dl_is_polled); + queue_work(ar->workqueue, &ar_pci->dump_work); } -static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info, - int num) +void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, + int force) { - struct ath10k *ar = pipe_info->hif_ce_state; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ce_state *ce_state = pipe_info->ce_hdl; - struct sk_buff *skb; - dma_addr_t ce_data; - int i, ret = 0; - if (pipe_info->buf_sz == 0) - return 0; + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n"); - for (i = 0; i < num; i++) { - skb = dev_alloc_skb(pipe_info->buf_sz); - if (!skb) { - ath10k_warn("could not allocate skbuff for pipe %d\n", - num); - ret = -ENOMEM; - goto err; - } + if (!force) { + int resources; + /* + * Decide whether to actually poll for completions, or just + * wait for a later chance. + * If there seem to be plenty of resources left, then just wait + * since checking involves reading a CE register, which is a + * relatively expensive operation. + */ + resources = ath10k_pci_hif_get_free_queue_number(ar, pipe); + + /* + * If at least 50% of the total resources are still available, + * don't bother checking again yet. + */ + if (resources > (ar_pci->attr[pipe].src_nentries >> 1)) + return; + } + ath10k_ce_per_engine_service(ar, pipe); +} - WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); +static void ath10k_pci_rx_retry_sync(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - ce_data = dma_map_single(ar->dev, skb->data, - skb->len + skb_tailroom(skb), - DMA_FROM_DEVICE); + timer_delete_sync(&ar_pci->rx_post_retry); +} - if (unlikely(dma_mapping_error(ar->dev, ce_data))) { - ath10k_warn("could not dma map skbuff\n"); - dev_kfree_skb_any(skb); - ret = -EIO; - goto err; - } +int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + const struct ce_service_to_pipe *entry; + bool ul_set = false, dl_set = false; + int i; - ATH10K_SKB_CB(skb)->paddr = ce_data; + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n"); - pci_dma_sync_single_for_device(ar_pci->pdev, ce_data, - pipe_info->buf_sz, - PCI_DMA_FROMDEVICE); + for (i = 0; i < ARRAY_SIZE(pci_target_service_to_ce_map_wlan); i++) { + entry = &ar_pci->serv_to_pipe[i]; - ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb, - ce_data); - if (ret) { - ath10k_warn("could not enqueue to pipe %d (%d)\n", - num, ret); - goto err; + if (__le32_to_cpu(entry->service_id) != service_id) + continue; + + switch (__le32_to_cpu(entry->pipedir)) { + case PIPEDIR_NONE: + break; + case PIPEDIR_IN: + WARN_ON(dl_set); + *dl_pipe = __le32_to_cpu(entry->pipenum); + dl_set = true; + break; + case PIPEDIR_OUT: + WARN_ON(ul_set); + *ul_pipe = __le32_to_cpu(entry->pipenum); + ul_set = true; + break; + case PIPEDIR_INOUT: + WARN_ON(dl_set); + WARN_ON(ul_set); + *dl_pipe = __le32_to_cpu(entry->pipenum); + *ul_pipe = __le32_to_cpu(entry->pipenum); + dl_set = true; + ul_set = true; + break; } } - return ret; + if (!ul_set || !dl_set) + return -ENOENT; -err: - ath10k_pci_rx_pipe_cleanup(pipe_info); - return ret; + return 0; } -static int ath10k_pci_post_rx(struct ath10k *ar) +void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, + u8 *ul_pipe, u8 *dl_pipe) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct hif_ce_pipe_info *pipe_info; - const struct ce_attr *attr; - int pipe_num, ret = 0; + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n"); - for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { - pipe_info = &ar_pci->pipe_info[pipe_num]; - attr = &host_ce_config_wlan[pipe_num]; + (void)ath10k_pci_hif_map_service_to_pipe(ar, + ATH10K_HTC_SVC_ID_RSVD_CTRL, + ul_pipe, dl_pipe); +} - if (attr->dest_nentries == 0) - continue; +void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) +{ + u32 val; - ret = ath10k_pci_post_rx_pipe(pipe_info, - attr->dest_nentries - 1); - if (ret) { - ath10k_warn("Unable to replenish recv buffers for pipe: %d\n", - pipe_num); + switch (ar->hw_rev) { + case ATH10K_HW_QCA988X: + case ATH10K_HW_QCA9887: + case ATH10K_HW_QCA6174: + case ATH10K_HW_QCA9377: + val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + + CORE_CTRL_ADDRESS); + val &= ~CORE_CTRL_PCIE_REG_31_MASK; + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + + CORE_CTRL_ADDRESS, val); + break; + case ATH10K_HW_QCA99X0: + case ATH10K_HW_QCA9984: + case ATH10K_HW_QCA9888: + case ATH10K_HW_QCA4019: + /* TODO: Find appropriate register configuration for QCA99X0 + * to mask irq/MSI. + */ + break; + case ATH10K_HW_WCN3990: + break; + } +} - for (; pipe_num >= 0; pipe_num--) { - pipe_info = &ar_pci->pipe_info[pipe_num]; - ath10k_pci_rx_pipe_cleanup(pipe_info); - } - return ret; - } +static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) +{ + u32 val; + + switch (ar->hw_rev) { + case ATH10K_HW_QCA988X: + case ATH10K_HW_QCA9887: + case ATH10K_HW_QCA6174: + case ATH10K_HW_QCA9377: + val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + + CORE_CTRL_ADDRESS); + val |= CORE_CTRL_PCIE_REG_31_MASK; + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + + CORE_CTRL_ADDRESS, val); + break; + case ATH10K_HW_QCA99X0: + case ATH10K_HW_QCA9984: + case ATH10K_HW_QCA9888: + case ATH10K_HW_QCA4019: + /* TODO: Find appropriate register configuration for QCA99X0 + * to unmask irq/MSI. + */ + break; + case ATH10K_HW_WCN3990: + break; } +} - return 0; +static void ath10k_pci_irq_disable(struct ath10k *ar) +{ + ath10k_ce_disable_interrupts(ar); + ath10k_pci_disable_and_clear_intx_irq(ar); + ath10k_pci_irq_msi_fw_mask(ar); +} + +static void ath10k_pci_irq_sync(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + synchronize_irq(ar_pci->pdev->irq); +} + +static void ath10k_pci_irq_enable(struct ath10k *ar) +{ + ath10k_ce_enable_interrupts(ar); + ath10k_pci_enable_intx_irq(ar); + ath10k_pci_irq_msi_fw_unmask(ar); } static int ath10k_pci_hif_start(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int ret; - ret = ath10k_pci_start_ce(ar); - if (ret) { - ath10k_warn("could not start CE (%d)\n", ret); - return ret; - } + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n"); - /* Post buffers once to start things off. */ - ret = ath10k_pci_post_rx(ar); - if (ret) { - ath10k_warn("could not post rx pipes (%d)\n", ret); - return ret; - } + ath10k_core_napi_enable(ar); + + ath10k_pci_irq_enable(ar); + ath10k_pci_rx_post(ar); + + pcie_capability_clear_and_set_word(ar_pci->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC, + ar_pci->link_ctl & PCI_EXP_LNKCTL_ASPMC); - ar_pci->started = 1; return 0; } -static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info) +static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) { struct ath10k *ar; - struct ath10k_pci *ar_pci; - struct ce_state *ce_hdl; - u32 buf_sz; - struct sk_buff *netbuf; - u32 ce_data; + struct ath10k_ce_pipe *ce_pipe; + struct ath10k_ce_ring *ce_ring; + struct sk_buff *skb; + int i; - buf_sz = pipe_info->buf_sz; + ar = pci_pipe->hif_ce_state; + ce_pipe = pci_pipe->ce_hdl; + ce_ring = ce_pipe->dest_ring; - /* Unused Copy Engine */ - if (buf_sz == 0) + if (!ce_ring) return; - ar = pipe_info->hif_ce_state; - ar_pci = ath10k_pci_priv(ar); - - if (!ar_pci->started) + if (!pci_pipe->buf_sz) return; - ce_hdl = pipe_info->ce_hdl; + for (i = 0; i < ce_ring->nentries; i++) { + skb = ce_ring->per_transfer_context[i]; + if (!skb) + continue; + + ce_ring->per_transfer_context[i] = NULL; - while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf, - &ce_data) == 0) { - dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr, - netbuf->len + skb_tailroom(netbuf), + dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, + skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); - dev_kfree_skb_any(netbuf); + dev_kfree_skb_any(skb); } } -static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info) +static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) { struct ath10k *ar; - struct ath10k_pci *ar_pci; - struct ce_state *ce_hdl; - struct sk_buff *netbuf; - u32 ce_data; - unsigned int nbytes; - unsigned int id; - u32 buf_sz; + struct ath10k_ce_pipe *ce_pipe; + struct ath10k_ce_ring *ce_ring; + struct sk_buff *skb; + int i; - buf_sz = pipe_info->buf_sz; + ar = pci_pipe->hif_ce_state; + ce_pipe = pci_pipe->ce_hdl; + ce_ring = ce_pipe->src_ring; - /* Unused Copy Engine */ - if (buf_sz == 0) + if (!ce_ring) return; - ar = pipe_info->hif_ce_state; - ar_pci = ath10k_pci_priv(ar); - - if (!ar_pci->started) + if (!pci_pipe->buf_sz) return; - ce_hdl = pipe_info->ce_hdl; + for (i = 0; i < ce_ring->nentries; i++) { + skb = ce_ring->per_transfer_context[i]; + if (!skb) + continue; + + ce_ring->per_transfer_context[i] = NULL; - while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf, - &ce_data, &nbytes, &id) == 0) { - if (netbuf != CE_SENDLIST_ITEM_CTXT) - /* - * Indicate the completion to higer layer to free - * the buffer - */ - ATH10K_SKB_CB(netbuf)->is_aborted = true; - ar_pci->msg_callbacks_current.tx_completion(ar, - netbuf, - id); + ath10k_htc_tx_completion_handler(ar, skb); } } @@ -1225,8 +2047,8 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar) struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int pipe_num; - for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { - struct hif_ce_pipe_info *pipe_info; + for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { + struct ath10k_pci_pipe *pipe_info; pipe_info = &ar_pci->pipe_info[pipe_num]; ath10k_pci_rx_pipe_cleanup(pipe_info); @@ -1234,51 +2056,71 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar) } } -static void ath10k_pci_ce_deinit(struct ath10k *ar) +void ath10k_pci_ce_deinit(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct hif_ce_pipe_info *pipe_info; - int pipe_num; + int i; - for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { - pipe_info = &ar_pci->pipe_info[pipe_num]; - if (pipe_info->ce_hdl) { - ath10k_ce_deinit(pipe_info->ce_hdl); - pipe_info->ce_hdl = NULL; - pipe_info->buf_sz = 0; - } - } + for (i = 0; i < CE_COUNT; i++) + ath10k_ce_deinit_pipe(ar, i); +} + +void ath10k_pci_flush(struct ath10k *ar) +{ + ath10k_pci_rx_retry_sync(ar); + ath10k_pci_buffer_cleanup(ar); } static void ath10k_pci_hif_stop(struct ath10k *ar) { - ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + unsigned long flags; - ath10k_pci_stop_ce(ar); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n"); - /* At this point, asynchronous threads are stopped, the target should - * not DMA nor interrupt. We process the leftovers and then free - * everything else up. */ + ath10k_pci_irq_disable(ar); + ath10k_pci_irq_sync(ar); - ath10k_pci_process_ce(ar); - ath10k_pci_cleanup_ce(ar); - ath10k_pci_buffer_cleanup(ar); - ath10k_pci_ce_deinit(ar); + ath10k_core_napi_sync_disable(ar); + + cancel_work_sync(&ar_pci->dump_work); + + /* Most likely the device has HTT Rx ring configured. The only way to + * prevent the device from accessing (and possible corrupting) host + * memory is to reset the chip now. + * + * There's also no known way of masking MSI interrupts on the device. + * For ranged MSI the CE-related interrupts can be masked. However + * regardless how many MSI interrupts are assigned the first one + * is always used for firmware indications (crashes) and cannot be + * masked. To prevent the device from asserting the interrupt reset it + * before proceeding with cleanup. + */ + ath10k_pci_safe_chip_reset(ar); + + ath10k_pci_flush(ar); + + spin_lock_irqsave(&ar_pci->ps_lock, flags); + WARN_ON(ar_pci->ps_wake_refcount > 0); + spin_unlock_irqrestore(&ar_pci->ps_lock, flags); } -static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, - void *req, u32 req_len, - void *resp, u32 *resp_len) +int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, + void *req, u32 req_len, + void *resp, u32 *resp_len) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl; - struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl; + struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; + struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST]; + struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl; + struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl; dma_addr_t req_paddr = 0; dma_addr_t resp_paddr = 0; struct bmi_xfer xfer = {}; void *treq, *tresp = NULL; int ret = 0; + might_sleep(); + if (resp && !resp_len) return -EINVAL; @@ -1291,8 +2133,10 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE); ret = dma_mapping_error(ar->dev, req_paddr); - if (ret) + if (ret) { + ret = -EIO; goto err_dma; + } if (resp && resp_len) { tresp = kzalloc(*resp_len, GFP_KERNEL); @@ -1304,29 +2148,27 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, resp_paddr = dma_map_single(ar->dev, tresp, *resp_len, DMA_FROM_DEVICE); ret = dma_mapping_error(ar->dev, resp_paddr); - if (ret) + if (ret) { + ret = -EIO; goto err_req; + } xfer.wait_for_resp = true; xfer.resp_len = 0; - ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr); + ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr); } - init_completion(&xfer.done); - ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0); if (ret) goto err_resp; - ret = wait_for_completion_timeout(&xfer.done, - BMI_COMMUNICATION_TIMEOUT_HZ); - if (ret <= 0) { - u32 unused_buffer; + ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer); + if (ret) { + dma_addr_t unused_buffer; unsigned int unused_nbytes; unsigned int unused_id; - ret = -ETIMEDOUT; ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer, &unused_nbytes, &unused_id); } else { @@ -1336,7 +2178,7 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, err_resp: if (resp) { - u32 unused_buffer; + dma_addr_t unused_buffer; ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer); dma_unmap_single(ar->dev, resp_paddr, @@ -1347,7 +2189,7 @@ err_req: if (ret == 0 && resp_len) { *resp_len = min(*resp_len, xfer.resp_len); - memcpy(resp, tresp, xfer.resp_len); + memcpy(resp, tresp, *resp_len); } err_dma: kfree(treq); @@ -1356,133 +2198,70 @@ err_dma: return ret; } -static void ath10k_pci_bmi_send_done(struct ce_state *ce_state, - void *transfer_context, - u32 data, - unsigned int nbytes, - unsigned int transfer_id) +static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state) { - struct bmi_xfer *xfer = transfer_context; + struct bmi_xfer *xfer; - if (xfer->wait_for_resp) + if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer)) return; - complete(&xfer->done); + xfer->tx_done = true; } -static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state, - void *transfer_context, - u32 data, - unsigned int nbytes, - unsigned int transfer_id, - unsigned int flags) +static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state) { - struct bmi_xfer *xfer = transfer_context; + struct ath10k *ar = ce_state->ar; + struct bmi_xfer *xfer; + unsigned int nbytes; + + if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, + &nbytes)) + return; + + if (WARN_ON_ONCE(!xfer)) + return; if (!xfer->wait_for_resp) { - ath10k_warn("unexpected: BMI data received; ignoring\n"); + ath10k_warn(ar, "unexpected: BMI data received; ignoring\n"); return; } xfer->resp_len = nbytes; - complete(&xfer->done); + xfer->rx_done = true; } -/* - * Map from service/endpoint to Copy Engine. - * This table is derived from the CE_PCI TABLE, above. - * It is passed to the Target at startup for use by firmware. - */ -static const struct service_to_pipe target_service_to_ce_map_wlan[] = { - { - ATH10K_HTC_SVC_ID_WMI_DATA_VO, - PIPEDIR_OUT, /* out = UL = host -> target */ - 3, - }, - { - ATH10K_HTC_SVC_ID_WMI_DATA_VO, - PIPEDIR_IN, /* in = DL = target -> host */ - 2, - }, - { - ATH10K_HTC_SVC_ID_WMI_DATA_BK, - PIPEDIR_OUT, /* out = UL = host -> target */ - 3, - }, - { - ATH10K_HTC_SVC_ID_WMI_DATA_BK, - PIPEDIR_IN, /* in = DL = target -> host */ - 2, - }, - { - ATH10K_HTC_SVC_ID_WMI_DATA_BE, - PIPEDIR_OUT, /* out = UL = host -> target */ - 3, - }, - { - ATH10K_HTC_SVC_ID_WMI_DATA_BE, - PIPEDIR_IN, /* in = DL = target -> host */ - 2, - }, - { - ATH10K_HTC_SVC_ID_WMI_DATA_VI, - PIPEDIR_OUT, /* out = UL = host -> target */ - 3, - }, - { - ATH10K_HTC_SVC_ID_WMI_DATA_VI, - PIPEDIR_IN, /* in = DL = target -> host */ - 2, - }, - { - ATH10K_HTC_SVC_ID_WMI_CONTROL, - PIPEDIR_OUT, /* out = UL = host -> target */ - 3, - }, - { - ATH10K_HTC_SVC_ID_WMI_CONTROL, - PIPEDIR_IN, /* in = DL = target -> host */ - 2, - }, - { - ATH10K_HTC_SVC_ID_RSVD_CTRL, - PIPEDIR_OUT, /* out = UL = host -> target */ - 0, /* could be moved to 3 (share with WMI) */ - }, - { - ATH10K_HTC_SVC_ID_RSVD_CTRL, - PIPEDIR_IN, /* in = DL = target -> host */ - 1, - }, - { - ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */ - PIPEDIR_OUT, /* out = UL = host -> target */ - 0, - }, - { - ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */ - PIPEDIR_IN, /* in = DL = target -> host */ - 1, - }, - { - ATH10K_HTC_SVC_ID_HTT_DATA_MSG, - PIPEDIR_OUT, /* out = UL = host -> target */ - 4, - }, - { - ATH10K_HTC_SVC_ID_HTT_DATA_MSG, - PIPEDIR_IN, /* in = DL = target -> host */ - 1, - }, +static int ath10k_pci_bmi_wait(struct ath10k *ar, + struct ath10k_ce_pipe *tx_pipe, + struct ath10k_ce_pipe *rx_pipe, + struct bmi_xfer *xfer) +{ + unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ; + unsigned long started = jiffies; + unsigned long dur; + int ret; - /* (Additions here) */ + while (time_before_eq(jiffies, timeout)) { + ath10k_pci_bmi_send_done(tx_pipe); + ath10k_pci_bmi_recv_data(rx_pipe); - { /* Must be last */ - 0, - 0, - 0, - }, -}; + if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) { + ret = 0; + goto out; + } + + schedule(); + } + + ret = -ETIMEDOUT; + +out: + dur = jiffies - started; + if (dur > HZ) + ath10k_dbg(ar, ATH10K_DBG_BMI, + "bmi cmd took %lu jiffies hz %d ret %d\n", + dur, HZ, ret); + return ret; +} /* * Send an interrupt to the device to wake up the Target CPU @@ -1490,31 +2269,62 @@ static const struct service_to_pipe target_service_to_ce_map_wlan[] = { */ static int ath10k_pci_wake_target_cpu(struct ath10k *ar) { - int ret; - u32 core_ctrl; + u32 addr, val; - ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS | - CORE_CTRL_ADDRESS, - &core_ctrl); - if (ret) { - ath10k_warn("Unable to read core ctrl\n"); - return ret; + addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS; + val = ath10k_pci_read32(ar, addr); + val |= CORE_CTRL_CPU_INTR_MASK; + ath10k_pci_write32(ar, addr, val); + + return 0; +} + +static int ath10k_pci_get_num_banks(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + switch (ar_pci->pdev->device) { + case QCA988X_2_0_DEVICE_ID_UBNT: + case QCA988X_2_0_DEVICE_ID: + case QCA99X0_2_0_DEVICE_ID: + case QCA9888_2_0_DEVICE_ID: + case QCA9984_1_0_DEVICE_ID: + case QCA9887_1_0_DEVICE_ID: + return 1; + case QCA6164_2_1_DEVICE_ID: + case QCA6174_2_1_DEVICE_ID: + switch (MS(ar->bus_param.chip_id, SOC_CHIP_ID_REV)) { + case QCA6174_HW_1_0_CHIP_ID_REV: + case QCA6174_HW_1_1_CHIP_ID_REV: + case QCA6174_HW_2_1_CHIP_ID_REV: + case QCA6174_HW_2_2_CHIP_ID_REV: + return 3; + case QCA6174_HW_1_3_CHIP_ID_REV: + return 2; + case QCA6174_HW_3_0_CHIP_ID_REV: + case QCA6174_HW_3_1_CHIP_ID_REV: + case QCA6174_HW_3_2_CHIP_ID_REV: + return 9; + } + break; + case QCA9377_1_0_DEVICE_ID: + return 9; } - /* A_INUM_FIRMWARE interrupt to Target CPU */ - core_ctrl |= CORE_CTRL_CPU_INTR_MASK; + ath10k_warn(ar, "unknown number of banks, assuming 1\n"); + return 1; +} - ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS | - CORE_CTRL_ADDRESS, - core_ctrl); - if (ret) - ath10k_warn("Unable to set interrupt mask\n"); +static int ath10k_bus_get_num_banks(struct ath10k *ar) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); - return ret; + return ce->bus_ops->get_num_banks(ar); } -static int ath10k_pci_init_config(struct ath10k *ar) +int ath10k_pci_init_config(struct ath10k *ar) { + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); u32 interconnect_targ_addr; u32 pcie_state_targ_addr = 0; u32 pipe_cfg_targ_addr = 0; @@ -1531,273 +2341,758 @@ static int ath10k_pci_init_config(struct ath10k *ar) host_interest_item_address(HI_ITEM(hi_interconnect_state)); /* Supply Target-side CE configuration */ - ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr, - &pcie_state_targ_addr); + ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr, + &pcie_state_targ_addr); if (ret != 0) { - ath10k_err("Failed to get pcie state addr: %d\n", ret); + ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret); return ret; } if (pcie_state_targ_addr == 0) { ret = -EIO; - ath10k_err("Invalid pcie state addr\n"); + ath10k_err(ar, "Invalid pcie state addr\n"); return ret; } - ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr + + ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + offsetof(struct pcie_state, - pipe_cfg_addr), - &pipe_cfg_targ_addr); + pipe_cfg_addr)), + &pipe_cfg_targ_addr); if (ret != 0) { - ath10k_err("Failed to get pipe cfg addr: %d\n", ret); + ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret); return ret; } if (pipe_cfg_targ_addr == 0) { ret = -EIO; - ath10k_err("Invalid pipe cfg addr\n"); + ath10k_err(ar, "Invalid pipe cfg addr\n"); return ret; } ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr, - target_ce_config_wlan, - sizeof(target_ce_config_wlan)); + ar_pci->pipe_config, + sizeof(struct ce_pipe_config) * + NUM_TARGET_CE_CONFIG_WLAN); if (ret != 0) { - ath10k_err("Failed to write pipe cfg: %d\n", ret); + ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret); return ret; } - ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr + + ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + offsetof(struct pcie_state, - svc_to_pipe_map), - &svc_to_pipe_map); + svc_to_pipe_map)), + &svc_to_pipe_map); if (ret != 0) { - ath10k_err("Failed to get svc/pipe map: %d\n", ret); + ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret); return ret; } if (svc_to_pipe_map == 0) { ret = -EIO; - ath10k_err("Invalid svc_to_pipe map\n"); + ath10k_err(ar, "Invalid svc_to_pipe map\n"); return ret; } ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map, - target_service_to_ce_map_wlan, - sizeof(target_service_to_ce_map_wlan)); + ar_pci->serv_to_pipe, + sizeof(pci_target_service_to_ce_map_wlan)); if (ret != 0) { - ath10k_err("Failed to write svc/pipe map: %d\n", ret); + ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret); return ret; } - ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr + + ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + offsetof(struct pcie_state, - config_flags), - &pcie_config_flags); + config_flags)), + &pcie_config_flags); if (ret != 0) { - ath10k_err("Failed to get pcie config_flags: %d\n", ret); + ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret); return ret; } pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1; - ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr + - offsetof(struct pcie_state, config_flags), - &pcie_config_flags, - sizeof(pcie_config_flags)); + ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr + + offsetof(struct pcie_state, + config_flags)), + pcie_config_flags); if (ret != 0) { - ath10k_err("Failed to write pcie config_flags: %d\n", ret); + ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret); return ret; } /* configure early allocation */ ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc)); - ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value); + ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value); if (ret != 0) { - ath10k_err("Faile to get early alloc val: %d\n", ret); + ath10k_err(ar, "Failed to get early alloc val: %d\n", ret); return ret; } /* first bank is switched to IRAM */ ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & HI_EARLY_ALLOC_MAGIC_MASK); - ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & + ealloc_value |= ((ath10k_bus_get_num_banks(ar) << + HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & HI_EARLY_ALLOC_IRAM_BANKS_MASK); - ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value); + ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value); if (ret != 0) { - ath10k_err("Failed to set early alloc val: %d\n", ret); + ath10k_err(ar, "Failed to set early alloc val: %d\n", ret); return ret; } /* Tell Target to proceed with initialization */ flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2)); - ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value); + ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value); if (ret != 0) { - ath10k_err("Failed to get option val: %d\n", ret); + ath10k_err(ar, "Failed to get option val: %d\n", ret); return ret; } flag2_value |= HI_OPTION_EARLY_CFG_DONE; - ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value); + ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value); if (ret != 0) { - ath10k_err("Failed to set option val: %d\n", ret); + ath10k_err(ar, "Failed to set option val: %d\n", ret); return ret; } return 0; } +static void ath10k_pci_override_ce_config(struct ath10k *ar) +{ + struct ce_attr *attr; + struct ce_pipe_config *config; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + /* For QCA6174 we're overriding the Copy Engine 5 configuration, + * since it is currently used for other feature. + */ + + /* Override Host's Copy Engine 5 configuration */ + attr = &ar_pci->attr[5]; + attr->src_sz_max = 0; + attr->dest_nentries = 0; + + /* Override Target firmware's Copy Engine configuration */ + config = &ar_pci->pipe_config[5]; + config->pipedir = __cpu_to_le32(PIPEDIR_OUT); + config->nbytes_max = __cpu_to_le32(2048); + /* Map from service/endpoint to Copy Engine */ + ar_pci->serv_to_pipe[15].pipenum = __cpu_to_le32(1); +} -static int ath10k_pci_ce_init(struct ath10k *ar) +int ath10k_pci_alloc_pipes(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct hif_ce_pipe_info *pipe_info; - const struct ce_attr *attr; - int pipe_num; + struct ath10k_pci_pipe *pipe; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + int i, ret; - for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { - pipe_info = &ar_pci->pipe_info[pipe_num]; - pipe_info->pipe_num = pipe_num; - pipe_info->hif_ce_state = ar; - attr = &host_ce_config_wlan[pipe_num]; - - pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr); - if (pipe_info->ce_hdl == NULL) { - ath10k_err("Unable to initialize CE for pipe: %d\n", - pipe_num); - - /* It is safe to call it here. It checks if ce_hdl is - * valid for each pipe */ - ath10k_pci_ce_deinit(ar); - return -1; + for (i = 0; i < CE_COUNT; i++) { + pipe = &ar_pci->pipe_info[i]; + pipe->ce_hdl = &ce->ce_states[i]; + pipe->pipe_num = i; + pipe->hif_ce_state = ar; + + ret = ath10k_ce_alloc_pipe(ar, i, &ar_pci->attr[i]); + if (ret) { + ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n", + i, ret); + return ret; } - if (pipe_num == ar_pci->ce_count - 1) { - /* - * Reserve the ultimate CE for - * diagnostic Window support - */ - ar_pci->ce_diag = - ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl; + /* Last CE is Diagnostic Window */ + if (i == CE_DIAG_PIPE) { + ar_pci->ce_diag = pipe->ce_hdl; continue; } - pipe_info->buf_sz = (size_t) (attr->src_sz_max); + pipe->buf_sz = (size_t)(ar_pci->attr[i].src_sz_max); } - /* - * Initially, establish CE completion handlers for use with BMI. - * These are overwritten with generic handlers after we exit BMI phase. + return 0; +} + +void ath10k_pci_free_pipes(struct ath10k *ar) +{ + int i; + + for (i = 0; i < CE_COUNT; i++) + ath10k_ce_free_pipe(ar, i); +} + +int ath10k_pci_init_pipes(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int i, ret; + + for (i = 0; i < CE_COUNT; i++) { + ret = ath10k_ce_init_pipe(ar, i, &ar_pci->attr[i]); + if (ret) { + ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n", + i, ret); + return ret; + } + } + + return 0; +} + +static bool ath10k_pci_has_fw_crashed(struct ath10k *ar) +{ + return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) & + FW_IND_EVENT_PENDING; +} + +static void ath10k_pci_fw_crashed_clear(struct ath10k *ar) +{ + u32 val; + + val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); + val &= ~FW_IND_EVENT_PENDING; + ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val); +} + +static bool ath10k_pci_has_device_gone(struct ath10k *ar) +{ + u32 val; + + val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); + return (val == 0xffffffff); +} + +/* this function effectively clears target memory controller assert line */ +static void ath10k_pci_warm_reset_si0(struct ath10k *ar) +{ + u32 val; + + val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); + ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, + val | SOC_RESET_CONTROL_SI0_RST_MASK); + val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); + + msleep(10); + + val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); + ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, + val & ~SOC_RESET_CONTROL_SI0_RST_MASK); + val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); + + msleep(10); +} + +static void ath10k_pci_warm_reset_cpu(struct ath10k *ar) +{ + u32 val; + + ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0); + + val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); + ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, + val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK); +} + +static void ath10k_pci_warm_reset_ce(struct ath10k *ar) +{ + u32 val; + + val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); + + ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, + val | SOC_RESET_CONTROL_CE_RST_MASK); + msleep(10); + ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, + val & ~SOC_RESET_CONTROL_CE_RST_MASK); +} + +static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar) +{ + u32 val; + + val = ath10k_pci_soc_read32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS); + ath10k_pci_soc_write32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS, + val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK); +} + +static int ath10k_pci_warm_reset(struct ath10k *ar) +{ + int ret; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n"); + + spin_lock_bh(&ar->data_lock); + ar->stats.fw_warm_reset_counter++; + spin_unlock_bh(&ar->data_lock); + + ath10k_pci_irq_disable(ar); + + /* Make sure the target CPU is not doing anything dangerous, e.g. if it + * were to access copy engine while host performs copy engine reset + * then it is possible for the device to confuse pci-e controller to + * the point of bringing host system to a complete stop (i.e. hang). */ - pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; - ath10k_ce_send_cb_register(pipe_info->ce_hdl, - ath10k_pci_bmi_send_done, 0); + ath10k_pci_warm_reset_si0(ar); + ath10k_pci_warm_reset_cpu(ar); + ath10k_pci_init_pipes(ar); + ath10k_pci_wait_for_target_init(ar); + + ath10k_pci_warm_reset_clear_lf(ar); + ath10k_pci_warm_reset_ce(ar); + ath10k_pci_warm_reset_cpu(ar); + ath10k_pci_init_pipes(ar); + + ret = ath10k_pci_wait_for_target_init(ar); + if (ret) { + ath10k_warn(ar, "failed to wait for target init: %d\n", ret); + return ret; + } - pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST]; - ath10k_ce_recv_cb_register(pipe_info->ce_hdl, - ath10k_pci_bmi_recv_data); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n"); return 0; } -static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar) +static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar) +{ + ath10k_pci_irq_disable(ar); + return ath10k_pci_qca99x0_chip_reset(ar); +} + +static int ath10k_pci_safe_chip_reset(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - u32 fw_indicator_address, fw_indicator; - ath10k_pci_wake(ar); + if (!ar_pci->pci_soft_reset) + return -EOPNOTSUPP; + + return ar_pci->pci_soft_reset(ar); +} - fw_indicator_address = ar_pci->fw_indicator_address; - fw_indicator = ath10k_pci_read32(ar, fw_indicator_address); +static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar) +{ + int i, ret; + u32 val; - if (fw_indicator & FW_IND_EVENT_PENDING) { - /* ACK: clear Target-side pending event */ - ath10k_pci_write32(ar, fw_indicator_address, - fw_indicator & ~FW_IND_EVENT_PENDING); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n"); - if (ar_pci->started) { - ath10k_pci_hif_dump_area(ar); - } else { - /* - * Probable Target failure before we're prepared - * to handle it. Generally unexpected. - */ - ath10k_warn("early firmware event indicated\n"); + /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset. + * It is thus preferred to use warm reset which is safer but may not be + * able to recover the device from all possible fail scenarios. + * + * Warm reset doesn't always work on first try so attempt it a few + * times before giving up. + */ + for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) { + ret = ath10k_pci_warm_reset(ar); + if (ret) { + ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n", + i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, + ret); + continue; + } + + /* FIXME: Sometimes copy engine doesn't recover after warm + * reset. In most cases this needs cold reset. In some of these + * cases the device is in such a state that a cold reset may + * lock up the host. + * + * Reading any host interest register via copy engine is + * sufficient to verify if device is capable of booting + * firmware blob. + */ + ret = ath10k_pci_init_pipes(ar); + if (ret) { + ath10k_warn(ar, "failed to init copy engine: %d\n", + ret); + continue; + } + + ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS, + &val); + if (ret) { + ath10k_warn(ar, "failed to poke copy engine: %d\n", + ret); + continue; } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n"); + return 0; } - ath10k_pci_sleep(ar); + if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) { + ath10k_warn(ar, "refusing cold reset as requested\n"); + return -EPERM; + } + + ret = ath10k_pci_cold_reset(ar); + if (ret) { + ath10k_warn(ar, "failed to cold reset: %d\n", ret); + return ret; + } + + ret = ath10k_pci_wait_for_target_init(ar); + if (ret) { + ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", + ret); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n"); + + return 0; } -static const struct ath10k_hif_ops ath10k_pci_hif_ops = { - .send_head = ath10k_pci_hif_send_head, - .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, - .start = ath10k_pci_hif_start, - .stop = ath10k_pci_hif_stop, - .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe, - .get_default_pipe = ath10k_pci_hif_get_default_pipe, - .send_complete_check = ath10k_pci_hif_send_complete_check, - .init = ath10k_pci_hif_post_init, - .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, -}; +static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar) +{ + int ret; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n"); + + /* FIXME: QCA6174 requires cold + warm reset to work. */ + + ret = ath10k_pci_cold_reset(ar); + if (ret) { + ath10k_warn(ar, "failed to cold reset: %d\n", ret); + return ret; + } + + ret = ath10k_pci_wait_for_target_init(ar); + if (ret) { + ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", + ret); + return ret; + } + + ret = ath10k_pci_warm_reset(ar); + if (ret) { + ath10k_warn(ar, "failed to warm reset: %d\n", ret); + return ret; + } -static void ath10k_pci_ce_tasklet(unsigned long ptr) + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n"); + + return 0; +} + +static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar) { - struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr; - struct ath10k_pci *ar_pci = pipe->ar_pci; + int ret; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n"); - ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num); + ret = ath10k_pci_cold_reset(ar); + if (ret) { + ath10k_warn(ar, "failed to cold reset: %d\n", ret); + return ret; + } + + ret = ath10k_pci_wait_for_target_init(ar); + if (ret) { + ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", + ret); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n"); + + return 0; } -static void ath10k_msi_err_tasklet(unsigned long data) +static int ath10k_pci_chip_reset(struct ath10k *ar) { - struct ath10k *ar = (struct ath10k *)data; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - ath10k_pci_fw_interrupt_handler(ar); + if (WARN_ON(!ar_pci->pci_hard_reset)) + return -EOPNOTSUPP; + + return ar_pci->pci_hard_reset(ar); } -/* - * Handler for a per-engine interrupt on a PARTICULAR CE. - * This is used in cases where each CE has a private MSI interrupt. - */ -static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg) +static int ath10k_pci_hif_power_up(struct ath10k *ar, + enum ath10k_firmware_mode fw_mode) { - struct ath10k *ar = arg; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL; + int ret; - if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) { - ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id); - return IRQ_HANDLED; - } + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n"); + + pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL, + &ar_pci->link_ctl); + pcie_capability_clear_word(ar_pci->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC); /* - * NOTE: We are able to derive ce_id from irq because we - * use a one-to-one mapping for CE's 0..5. - * CE's 6 & 7 do not use interrupts at all. + * Bring the target up cleanly. * - * This mapping must be kept in sync with the mapping - * used by firmware. + * The target may be in an undefined state with an AUX-powered Target + * and a Host in WoW mode. If the Host crashes, loses power, or is + * restarted (without unloading the driver) then the Target is left + * (aux) powered and running. On a subsequent driver load, the Target + * is in an unexpected state. We try to catch that here in order to + * reset the Target and retry the probe. */ - tasklet_schedule(&ar_pci->pipe_info[ce_id].intr); - return IRQ_HANDLED; + ret = ath10k_pci_chip_reset(ar); + if (ret) { + if (ath10k_pci_has_fw_crashed(ar)) { + ath10k_warn(ar, "firmware crashed during chip reset\n"); + ath10k_pci_fw_crashed_clear(ar); + ath10k_pci_fw_crashed_dump(ar); + } + + ath10k_err(ar, "failed to reset chip: %d\n", ret); + goto err_sleep; + } + + ret = ath10k_pci_init_pipes(ar); + if (ret) { + ath10k_err(ar, "failed to initialize CE: %d\n", ret); + goto err_sleep; + } + + ret = ath10k_pci_init_config(ar); + if (ret) { + ath10k_err(ar, "failed to setup init config: %d\n", ret); + goto err_ce; + } + + ret = ath10k_pci_wake_target_cpu(ar); + if (ret) { + ath10k_err(ar, "could not wake up target CPU: %d\n", ret); + goto err_ce; + } + + return 0; + +err_ce: + ath10k_pci_ce_deinit(ar); + +err_sleep: + return ret; } -static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg) +void ath10k_pci_hif_power_down(struct ath10k *ar) +{ + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); + + /* Currently hif_power_up performs effectively a reset and hif_stop + * resets the chip as well so there's no point in resetting here. + */ +} + +static int ath10k_pci_hif_suspend(struct ath10k *ar) +{ + /* Nothing to do; the important stuff is in the driver suspend. */ + return 0; +} + +static int ath10k_pci_suspend(struct ath10k *ar) +{ + /* The grace timer can still be counting down and ar->ps_awake be true. + * It is known that the device may be asleep after resuming regardless + * of the SoC powersave state before suspending. Hence make sure the + * device is asleep before proceeding. + */ + ath10k_pci_sleep_sync(ar); + + return 0; +} + +static int ath10k_pci_hif_resume(struct ath10k *ar) +{ + /* Nothing to do; the important stuff is in the driver resume. */ + return 0; +} + +static int ath10k_pci_resume(struct ath10k *ar) { - struct ath10k *ar = arg; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct pci_dev *pdev = ar_pci->pdev; + u32 val; + int ret = 0; - tasklet_schedule(&ar_pci->msi_fw_err); - return IRQ_HANDLED; + ret = ath10k_pci_force_wake(ar); + if (ret) { + ath10k_err(ar, "failed to wake up target: %d\n", ret); + return ret; + } + + /* Suspend/Resume resets the PCI configuration space, so we have to + * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries + * from interfering with C3 CPU state. pci_restore_state won't help + * here since it only restores the first 64 bytes pci config header. + */ + pci_read_config_dword(pdev, 0x40, &val); + if ((val & 0x0000ff00) != 0) + pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); + + return ret; +} + +static bool ath10k_pci_validate_cal(void *data, size_t size) +{ + __le16 *cal_words = data; + u16 checksum = 0; + size_t i; + + if (size % 2 != 0) + return false; + + for (i = 0; i < size / 2; i++) + checksum ^= le16_to_cpu(cal_words[i]); + + return checksum == 0xffff; +} + +static void ath10k_pci_enable_eeprom(struct ath10k *ar) +{ + /* Enable SI clock */ + ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0); + + /* Configure GPIOs for I2C operation */ + ath10k_pci_write32(ar, + GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET + + 4 * QCA9887_1_0_I2C_SDA_GPIO_PIN, + SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG, + GPIO_PIN0_CONFIG) | + SM(1, GPIO_PIN0_PAD_PULL)); + + ath10k_pci_write32(ar, + GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET + + 4 * QCA9887_1_0_SI_CLK_GPIO_PIN, + SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) | + SM(1, GPIO_PIN0_PAD_PULL)); + + ath10k_pci_write32(ar, + GPIO_BASE_ADDRESS + + QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS, + 1u << QCA9887_1_0_SI_CLK_GPIO_PIN); + + /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */ + ath10k_pci_write32(ar, + SI_BASE_ADDRESS + SI_CONFIG_OFFSET, + SM(1, SI_CONFIG_ERR_INT) | + SM(1, SI_CONFIG_BIDIR_OD_DATA) | + SM(1, SI_CONFIG_I2C) | + SM(1, SI_CONFIG_POS_SAMPLE) | + SM(1, SI_CONFIG_INACTIVE_DATA) | + SM(1, SI_CONFIG_INACTIVE_CLK) | + SM(8, SI_CONFIG_DIVIDER)); } +static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out) +{ + u32 reg; + int wait_limit; + + /* set device select byte and for the read operation */ + reg = QCA9887_EEPROM_SELECT_READ | + SM(addr, QCA9887_EEPROM_ADDR_LO) | + SM(addr >> 8, QCA9887_EEPROM_ADDR_HI); + ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg); + + /* write transmit data, transfer length, and START bit */ + ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, + SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) | + SM(4, SI_CS_TX_CNT)); + + /* wait max 1 sec */ + wait_limit = 100000; + + /* wait for SI_CS_DONE_INT */ + do { + reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET); + if (MS(reg, SI_CS_DONE_INT)) + break; + + wait_limit--; + udelay(10); + } while (wait_limit > 0); + + if (!MS(reg, SI_CS_DONE_INT)) { + ath10k_err(ar, "timeout while reading device EEPROM at %04x\n", + addr); + return -ETIMEDOUT; + } + + /* clear SI_CS_DONE_INT */ + ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg); + + if (MS(reg, SI_CS_DONE_ERR)) { + ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr); + return -EIO; + } + + /* extract receive data */ + reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET); + *out = reg; + + return 0; +} + +static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data, + size_t *data_len) +{ + u8 *caldata = NULL; + size_t calsize, i; + int ret; + + if (!QCA_REV_9887(ar)) + return -EOPNOTSUPP; + + calsize = ar->hw_params.cal_data_len; + caldata = kmalloc(calsize, GFP_KERNEL); + if (!caldata) + return -ENOMEM; + + ath10k_pci_enable_eeprom(ar); + + for (i = 0; i < calsize; i++) { + ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]); + if (ret) + goto err_free; + } + + if (!ath10k_pci_validate_cal(caldata, calsize)) + goto err_free; + + *data = caldata; + *data_len = calsize; + + return 0; + +err_free: + kfree(caldata); + + return -EINVAL; +} + +static const struct ath10k_hif_ops ath10k_pci_hif_ops = { + .tx_sg = ath10k_pci_hif_tx_sg, + .diag_read = ath10k_pci_hif_diag_read, + .diag_write = ath10k_pci_diag_write_mem, + .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, + .start = ath10k_pci_hif_start, + .stop = ath10k_pci_hif_stop, + .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe, + .get_default_pipe = ath10k_pci_hif_get_default_pipe, + .send_complete_check = ath10k_pci_hif_send_complete_check, + .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, + .power_up = ath10k_pci_hif_power_up, + .power_down = ath10k_pci_hif_power_down, + .read32 = ath10k_pci_read32, + .write32 = ath10k_pci_write32, + .suspend = ath10k_pci_hif_suspend, + .resume = ath10k_pci_hif_resume, + .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom, +}; + /* * Top-level interrupt handler for all PCI interrupts from a Target. * When a block of MSI interrupts is allocated, this top-level handler @@ -1807,510 +3102,632 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) { struct ath10k *ar = arg; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ret; - if (ar_pci->num_msi_intrs == 0) { - /* - * IMPORTANT: INTR_CLR regiser has to be set after - * INTR_ENABLE is set to 0, otherwise interrupt can not be - * really cleared. - */ - iowrite32(0, ar_pci->mem + - (SOC_CORE_BASE_ADDRESS | - PCIE_INTR_ENABLE_ADDRESS)); - iowrite32(PCIE_INTR_FIRMWARE_MASK | - PCIE_INTR_CE_MASK_ALL, - ar_pci->mem + (SOC_CORE_BASE_ADDRESS | - PCIE_INTR_CLR_ADDRESS)); - /* - * IMPORTANT: this extra read transaction is required to - * flush the posted write buffer. - */ - (void) ioread32(ar_pci->mem + - (SOC_CORE_BASE_ADDRESS | - PCIE_INTR_ENABLE_ADDRESS)); + if (ath10k_pci_has_device_gone(ar)) + return IRQ_NONE; + + ret = ath10k_pci_force_wake(ar); + if (ret) { + ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret); + return IRQ_NONE; } - tasklet_schedule(&ar_pci->intr_tq); + if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_INTX) && + !ath10k_pci_irq_pending(ar)) + return IRQ_NONE; + + ath10k_pci_disable_and_clear_intx_irq(ar); + ath10k_pci_irq_msi_fw_mask(ar); + napi_schedule(&ar->napi); return IRQ_HANDLED; } -static void ath10k_pci_tasklet(unsigned long data) +static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget) { - struct ath10k *ar = (struct ath10k *)data; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k *ar = container_of(ctx, struct ath10k, napi); + int done = 0; + + if (ath10k_pci_has_fw_crashed(ar)) { + ath10k_pci_fw_crashed_clear(ar); + ath10k_pci_fw_crashed_dump(ar); + napi_complete(ctx); + return done; + } - ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */ ath10k_ce_per_engine_service_any(ar); - if (ar_pci->num_msi_intrs == 0) { - /* Enable Legacy PCI line interrupts */ - iowrite32(PCIE_INTR_FIRMWARE_MASK | - PCIE_INTR_CE_MASK_ALL, - ar_pci->mem + (SOC_CORE_BASE_ADDRESS | - PCIE_INTR_ENABLE_ADDRESS)); - /* - * IMPORTANT: this extra read transaction is required to - * flush the posted write buffer + done = ath10k_htt_txrx_compl_task(ar, budget); + + if (done < budget) { + napi_complete_done(ctx, done); + /* In case of MSI, it is possible that interrupts are received + * while NAPI poll is inprogress. So pending interrupts that are + * received after processing all copy engine pipes by NAPI poll + * will not be handled again. This is causing failure to + * complete boot sequence in x86 platform. So before enabling + * interrupts safer to check for pending interrupts for + * immediate servicing. */ - (void) ioread32(ar_pci->mem + - (SOC_CORE_BASE_ADDRESS | - PCIE_INTR_ENABLE_ADDRESS)); + if (ath10k_ce_interrupt_summary(ar)) { + napi_schedule(ctx); + goto out; + } + ath10k_pci_enable_intx_irq(ar); + ath10k_pci_irq_msi_fw_unmask(ar); } + +out: + return done; } -static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num) +static int ath10k_pci_request_irq_msi(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret; - int i; - - ret = pci_enable_msi_block(ar_pci->pdev, num); - if (ret) - return ret; - ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, - ath10k_pci_msi_fw_handler, + ret = request_irq(ar_pci->pdev->irq, + ath10k_pci_interrupt_handler, IRQF_SHARED, "ath10k_pci", ar); - if (ret) + if (ret) { + ath10k_warn(ar, "failed to request MSI irq %d: %d\n", + ar_pci->pdev->irq, ret); return ret; - - for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) { - ret = request_irq(ar_pci->pdev->irq + i, - ath10k_pci_per_engine_handler, - IRQF_SHARED, "ath10k_pci", ar); - if (ret) { - ath10k_warn("request_irq(%d) failed %d\n", - ar_pci->pdev->irq + i, ret); - - for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--) - free_irq(ar_pci->pdev->irq + i, ar); - - free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar); - pci_disable_msi(ar_pci->pdev); - return ret; - } } - ath10k_info("MSI-X interrupt handling (%d intrs)\n", num); return 0; } -static int ath10k_pci_start_intr_msi(struct ath10k *ar) +static int ath10k_pci_request_irq_intx(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret; - ret = pci_enable_msi(ar_pci->pdev); - if (ret < 0) - return ret; - ret = request_irq(ar_pci->pdev->irq, ath10k_pci_interrupt_handler, IRQF_SHARED, "ath10k_pci", ar); - if (ret < 0) { - pci_disable_msi(ar_pci->pdev); + if (ret) { + ath10k_warn(ar, "failed to request legacy irq %d: %d\n", + ar_pci->pdev->irq, ret); return ret; } - ath10k_info("MSI interrupt handling\n"); return 0; } -static int ath10k_pci_start_intr_legacy(struct ath10k *ar) +static int ath10k_pci_request_irq(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int ret; - - ret = request_irq(ar_pci->pdev->irq, - ath10k_pci_interrupt_handler, - IRQF_SHARED, "ath10k_pci", ar); - if (ret < 0) - return ret; - /* - * Make sure to wake the Target before enabling Legacy - * Interrupt. - */ - iowrite32(PCIE_SOC_WAKE_V_MASK, - ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + - PCIE_SOC_WAKE_ADDRESS); + switch (ar_pci->oper_irq_mode) { + case ATH10K_PCI_IRQ_INTX: + return ath10k_pci_request_irq_intx(ar); + case ATH10K_PCI_IRQ_MSI: + return ath10k_pci_request_irq_msi(ar); + default: + return -EINVAL; + } +} - ath10k_pci_wait(ar); +static void ath10k_pci_free_irq(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - /* - * A potential race occurs here: The CORE_BASE write - * depends on target correctly decoding AXI address but - * host won't know when target writes BAR to CORE_CTRL. - * This write might get lost if target has NOT written BAR. - * For now, fix the race by repeating the write in below - * synchronization checking. - */ - iowrite32(PCIE_INTR_FIRMWARE_MASK | - PCIE_INTR_CE_MASK_ALL, - ar_pci->mem + (SOC_CORE_BASE_ADDRESS | - PCIE_INTR_ENABLE_ADDRESS)); - iowrite32(PCIE_SOC_WAKE_RESET, - ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + - PCIE_SOC_WAKE_ADDRESS); + free_irq(ar_pci->pdev->irq, ar); +} - ath10k_info("legacy interrupt handling\n"); - return 0; +void ath10k_pci_init_napi(struct ath10k *ar) +{ + netif_napi_add(ar->napi_dev, &ar->napi, ath10k_pci_napi_poll); } -static int ath10k_pci_start_intr(struct ath10k *ar) +static int ath10k_pci_init_irq(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int num = MSI_NUM_REQUEST; int ret; - int i; - - tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar); - tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet, - (unsigned long) ar); - for (i = 0; i < CE_COUNT; i++) { - ar_pci->pipe_info[i].ar_pci = ar_pci; - tasklet_init(&ar_pci->pipe_info[i].intr, - ath10k_pci_ce_tasklet, - (unsigned long)&ar_pci->pipe_info[i]); - } + ath10k_pci_init_napi(ar); - if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features)) - num = 1; + if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO) + ath10k_info(ar, "limiting irq mode to: %d\n", + ath10k_pci_irq_mode); - if (num > 1) { - ret = ath10k_pci_start_intr_msix(ar, num); + /* Try MSI */ + if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_INTX) { + ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI; + ret = pci_enable_msi(ar_pci->pdev); if (ret == 0) - goto exit; + return 0; - ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret); - num = 1; + /* MHI failed, try legacy irq next */ } - if (num == 1) { - ret = ath10k_pci_start_intr_msi(ar); - if (ret == 0) - goto exit; + /* Try legacy irq + * + * A potential race occurs here: The CORE_BASE write + * depends on target correctly decoding AXI address but + * host won't know when target writes BAR to CORE_CTRL. + * This write might get lost if target has NOT written BAR. + * For now, fix the race by repeating the write in below + * synchronization checking. + */ + ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_INTX; - ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n", - ret); - num = 0; - } + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, + PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); - ret = ath10k_pci_start_intr_legacy(ar); + return 0; +} -exit: - ar_pci->num_msi_intrs = num; - ar_pci->ce_count = CE_COUNT; - return ret; +static void ath10k_pci_deinit_irq_intx(struct ath10k *ar) +{ + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, + 0); } -static void ath10k_pci_stop_intr(struct ath10k *ar) +static int ath10k_pci_deinit_irq(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int i; - /* There's at least one interrupt irregardless whether its legacy INTR - * or MSI or MSI-X */ - for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) - free_irq(ar_pci->pdev->irq + i, ar); - - if (ar_pci->num_msi_intrs > 0) + switch (ar_pci->oper_irq_mode) { + case ATH10K_PCI_IRQ_INTX: + ath10k_pci_deinit_irq_intx(ar); + break; + default: pci_disable_msi(ar_pci->pdev); + break; + } + + return 0; } -static int ath10k_pci_reset_target(struct ath10k *ar) +int ath10k_pci_wait_for_target_init(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int wait_limit = 300; /* 3 sec */ + unsigned long timeout; + u32 val; - /* Wait for Target to finish initialization before we proceed. */ - iowrite32(PCIE_SOC_WAKE_V_MASK, - ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + - PCIE_SOC_WAKE_ADDRESS); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n"); + + timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT); + + do { + val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n", + val); - ath10k_pci_wait(ar); + /* target should never return this */ + if (val == 0xffffffff) + continue; + + /* the device has crashed so don't bother trying anymore */ + if (val & FW_IND_EVENT_PENDING) + break; - while (wait_limit-- && - !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) & - FW_IND_INITIALIZED)) { - if (ar_pci->num_msi_intrs == 0) + if (val & FW_IND_INITIALIZED) + break; + + if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_INTX) /* Fix potential race by repeating CORE_BASE writes */ - iowrite32(PCIE_INTR_FIRMWARE_MASK | - PCIE_INTR_CE_MASK_ALL, - ar_pci->mem + (SOC_CORE_BASE_ADDRESS | - PCIE_INTR_ENABLE_ADDRESS)); + ath10k_pci_enable_intx_irq(ar); + mdelay(10); - } + } while (time_before(jiffies, timeout)); - if (wait_limit < 0) { - ath10k_err("Target stalled\n"); - iowrite32(PCIE_SOC_WAKE_RESET, - ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + - PCIE_SOC_WAKE_ADDRESS); + ath10k_pci_disable_and_clear_intx_irq(ar); + ath10k_pci_irq_msi_fw_mask(ar); + + if (val == 0xffffffff) { + ath10k_err(ar, "failed to read device register, device is gone\n"); return -EIO; } - iowrite32(PCIE_SOC_WAKE_RESET, - ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + - PCIE_SOC_WAKE_ADDRESS); + if (val & FW_IND_EVENT_PENDING) { + ath10k_warn(ar, "device has crashed during init\n"); + return -ECOMM; + } + + if (!(val & FW_IND_INITIALIZED)) { + ath10k_err(ar, "failed to receive initialized event from target: %08x\n", + val); + return -ETIMEDOUT; + } + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n"); return 0; } -static void ath10k_pci_device_reset(struct ath10k_pci *ar_pci) +static int ath10k_pci_cold_reset(struct ath10k *ar) { - struct ath10k *ar = ar_pci->ar; - void __iomem *mem = ar_pci->mem; - int i; u32 val; - if (!SOC_GLOBAL_RESET_ADDRESS) - return; + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n"); - if (!mem) - return; + spin_lock_bh(&ar->data_lock); - ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, - PCIE_SOC_WAKE_V_MASK); - for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { - if (ath10k_pci_target_is_awake(ar)) - break; - msleep(1); - } + ar->stats.fw_cold_reset_counter++; + + spin_unlock_bh(&ar->data_lock); /* Put Target, including PCIe, into RESET. */ - val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS); + val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS); val |= 1; - ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val); + ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); - for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { - if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) & - RTC_STATE_COLD_RESET_MASK) - break; - msleep(1); - } + /* After writing into SOC_GLOBAL_RESET to put device into + * reset and pulling out of reset pcie may not be stable + * for any immediate pcie register access and cause bus error, + * add delay before any pcie access request to fix this issue. + */ + msleep(20); /* Pull Target, including PCIe, out of RESET. */ val &= ~1; - ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val); + ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); - for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { - if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) & - RTC_STATE_COLD_RESET_MASK)) - break; - msleep(1); + msleep(20); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n"); + + return 0; +} + +static int ath10k_pci_claim(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct pci_dev *pdev = ar_pci->pdev; + int ret; + + pci_set_drvdata(pdev, ar); + + ret = pci_enable_device(pdev); + if (ret) { + ath10k_err(ar, "failed to enable pci device: %d\n", ret); + return ret; + } + + ret = pci_request_region(pdev, BAR_NUM, "ath"); + if (ret) { + ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM, + ret); + goto err_device; + } + + /* Target expects 32 bit DMA. Enforce it. */ + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret); + goto err_region; + } + + pci_set_master(pdev); + + /* Arrange for access to Target SoC registers. */ + ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM); + ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0); + if (!ar_pci->mem) { + ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM); + ret = -EIO; + goto err_region; } - ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem); + return 0; + +err_region: + pci_release_region(pdev, BAR_NUM); + +err_device: + pci_disable_device(pdev); + + return ret; +} + +static void ath10k_pci_release(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct pci_dev *pdev = ar_pci->pdev; + + pci_iounmap(pdev, ar_pci->mem); + pci_release_region(pdev, BAR_NUM); + pci_disable_device(pdev); } -static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci) +static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id) { + const struct ath10k_pci_supp_chip *supp_chip; int i; + u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV); - for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) { - if (!test_bit(i, ar_pci->features)) - continue; + for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) { + supp_chip = &ath10k_pci_supp_chips[i]; - switch (i) { - case ATH10K_PCI_FEATURE_MSI_X: - ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n"); - break; - case ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND: - ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n"); - break; - } + if (supp_chip->dev_id == dev_id && + supp_chip->rev_id == rev_id) + return true; + } + + return false; +} + +int ath10k_pci_setup_resource(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); + int ret; + + spin_lock_init(&ce->ce_lock); + spin_lock_init(&ar_pci->ps_lock); + mutex_init(&ar_pci->ce_diag_mutex); + + INIT_WORK(&ar_pci->dump_work, ath10k_pci_fw_dump_work); + + timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0); + + ar_pci->attr = kmemdup(pci_host_ce_config_wlan, + sizeof(pci_host_ce_config_wlan), + GFP_KERNEL); + if (!ar_pci->attr) + return -ENOMEM; + + ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan, + sizeof(pci_target_ce_config_wlan), + GFP_KERNEL); + if (!ar_pci->pipe_config) { + ret = -ENOMEM; + goto err_free_attr; + } + + ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan, + sizeof(pci_target_service_to_ce_map_wlan), + GFP_KERNEL); + if (!ar_pci->serv_to_pipe) { + ret = -ENOMEM; + goto err_free_pipe_config; + } + + if (QCA_REV_6174(ar) || QCA_REV_9377(ar)) + ath10k_pci_override_ce_config(ar); + + ret = ath10k_pci_alloc_pipes(ar); + if (ret) { + ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", + ret); + goto err_free_serv_to_pipe; } + + return 0; + +err_free_serv_to_pipe: + kfree(ar_pci->serv_to_pipe); +err_free_pipe_config: + kfree(ar_pci->pipe_config); +err_free_attr: + kfree(ar_pci->attr); + return ret; +} + +void ath10k_pci_release_resource(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + ath10k_pci_rx_retry_sync(ar); + netif_napi_del(&ar->napi); + ath10k_pci_ce_deinit(ar); + ath10k_pci_free_pipes(ar); + kfree(ar_pci->attr); + kfree(ar_pci->pipe_config); + kfree(ar_pci->serv_to_pipe); } +static const struct ath10k_bus_ops ath10k_pci_bus_ops = { + .read32 = ath10k_bus_pci_read32, + .write32 = ath10k_bus_pci_write32, + .get_num_banks = ath10k_pci_get_num_banks, +}; + static int ath10k_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_dev) { - void __iomem *mem; int ret = 0; struct ath10k *ar; struct ath10k_pci *ar_pci; - u32 lcr_val; - - ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); - - ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL); - if (ar_pci == NULL) - return -ENOMEM; - - ar_pci->pdev = pdev; - ar_pci->dev = &pdev->dev; + enum ath10k_hw_rev hw_rev; + struct ath10k_bus_params bus_params = {}; + bool pci_ps, is_qca988x = false; + int (*pci_soft_reset)(struct ath10k *ar); + int (*pci_hard_reset)(struct ath10k *ar); + u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr); switch (pci_dev->device) { - case QCA988X_1_0_DEVICE_ID: - set_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features); - break; + case QCA988X_2_0_DEVICE_ID_UBNT: case QCA988X_2_0_DEVICE_ID: - set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features); + hw_rev = ATH10K_HW_QCA988X; + pci_ps = false; + is_qca988x = true; + pci_soft_reset = ath10k_pci_warm_reset; + pci_hard_reset = ath10k_pci_qca988x_chip_reset; + targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr; + break; + case QCA9887_1_0_DEVICE_ID: + hw_rev = ATH10K_HW_QCA9887; + pci_ps = false; + pci_soft_reset = ath10k_pci_warm_reset; + pci_hard_reset = ath10k_pci_qca988x_chip_reset; + targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr; + break; + case QCA6164_2_1_DEVICE_ID: + case QCA6174_2_1_DEVICE_ID: + hw_rev = ATH10K_HW_QCA6174; + pci_ps = true; + pci_soft_reset = ath10k_pci_warm_reset; + pci_hard_reset = ath10k_pci_qca6174_chip_reset; + targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr; + break; + case QCA99X0_2_0_DEVICE_ID: + hw_rev = ATH10K_HW_QCA99X0; + pci_ps = false; + pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; + pci_hard_reset = ath10k_pci_qca99x0_chip_reset; + targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr; + break; + case QCA9984_1_0_DEVICE_ID: + hw_rev = ATH10K_HW_QCA9984; + pci_ps = false; + pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; + pci_hard_reset = ath10k_pci_qca99x0_chip_reset; + targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr; + break; + case QCA9888_2_0_DEVICE_ID: + hw_rev = ATH10K_HW_QCA9888; + pci_ps = false; + pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; + pci_hard_reset = ath10k_pci_qca99x0_chip_reset; + targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr; + break; + case QCA9377_1_0_DEVICE_ID: + hw_rev = ATH10K_HW_QCA9377; + pci_ps = true; + pci_soft_reset = ath10k_pci_warm_reset; + pci_hard_reset = ath10k_pci_qca6174_chip_reset; + targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr; break; default: - ret = -ENODEV; - ath10k_err("Unkown device ID: %d\n", pci_dev->device); - goto err_ar_pci; + WARN_ON(1); + return -EOPNOTSUPP; } - ath10k_pci_dump_features(ar_pci); - - ar = ath10k_core_create(ar_pci, ar_pci->dev, ATH10K_BUS_PCI, - &ath10k_pci_hif_ops); + ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI, + hw_rev, &ath10k_pci_hif_ops); if (!ar) { - ath10k_err("ath10k_core_create failed!\n"); - ret = -EINVAL; - goto err_ar_pci; + dev_err(&pdev->dev, "failed to allocate core\n"); + return -ENOMEM; } - /* Enable QCA988X_1.0 HW workarounds */ - if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) - spin_lock_init(&ar_pci->hw_v1_workaround_lock); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n", + pdev->vendor, pdev->device, + pdev->subsystem_vendor, pdev->subsystem_device); + ar_pci = ath10k_pci_priv(ar); + ar_pci->pdev = pdev; + ar_pci->dev = &pdev->dev; ar_pci->ar = ar; - ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS; - atomic_set(&ar_pci->keep_awake_count, 0); - - pci_set_drvdata(pdev, ar); - - /* - * Without any knowledge of the Host, the Target may have been reset or - * power cycled and its Config Space may no longer reflect the PCI - * address space that was assigned earlier by the PCI infrastructure. - * Refresh it now. - */ - ret = pci_assign_resource(pdev, BAR_NUM); + ar->dev_id = pci_dev->device; + ar_pci->pci_ps = pci_ps; + ar_pci->ce.bus_ops = &ath10k_pci_bus_ops; + ar_pci->pci_soft_reset = pci_soft_reset; + ar_pci->pci_hard_reset = pci_hard_reset; + ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr; + ar->ce_priv = &ar_pci->ce; + + ar->id.vendor = pdev->vendor; + ar->id.device = pdev->device; + ar->id.subsystem_vendor = pdev->subsystem_vendor; + ar->id.subsystem_device = pdev->subsystem_device; + + timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0); + + ret = ath10k_pci_setup_resource(ar); if (ret) { - ath10k_err("cannot assign PCI space: %d\n", ret); - goto err_ar; + ath10k_err(ar, "failed to setup resource: %d\n", ret); + goto err_core_destroy; } - ret = pci_enable_device(pdev); + ret = ath10k_pci_claim(ar); if (ret) { - ath10k_err("cannot enable PCI device: %d\n", ret); - goto err_ar; + ath10k_err(ar, "failed to claim device: %d\n", ret); + goto err_free_pipes; } - /* Request MMIO resources */ - ret = pci_request_region(pdev, BAR_NUM, "ath"); + ret = ath10k_pci_force_wake(ar); if (ret) { - ath10k_err("PCI MMIO reservation error: %d\n", ret); - goto err_device; + ath10k_warn(ar, "failed to wake up device : %d\n", ret); + goto err_sleep; } - /* - * Target structures have a limit of 32 bit DMA pointers. - * DMA pointers can be wider than 32 bits by default on some systems. - */ - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + ath10k_pci_ce_deinit(ar); + ath10k_pci_irq_disable(ar); + + ret = ath10k_pci_init_irq(ar); if (ret) { - ath10k_err("32-bit DMA not available: %d\n", ret); - goto err_region; + ath10k_err(ar, "failed to init irqs: %d\n", ret); + goto err_sleep; } - ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n", + ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode, + ath10k_pci_irq_mode, ath10k_pci_reset_mode); + + ret = ath10k_pci_request_irq(ar); if (ret) { - ath10k_err("cannot enable 32-bit consistent DMA\n"); - goto err_region; + ath10k_warn(ar, "failed to request irqs: %d\n", ret); + goto err_deinit_irq; } - /* Set bus master bit in PCI_COMMAND to enable DMA */ - pci_set_master(pdev); - - /* - * Temporary FIX: disable ASPM - * Will be removed after the OTP is programmed + bus_params.dev_type = ATH10K_DEV_TYPE_LL; + bus_params.link_can_suspend = true; + /* Read CHIP_ID before reset to catch QCA9880-AR1A v1 devices that + * fall off the bus during chip_reset. These chips have the same pci + * device id as the QCA9880 BR4A or 2R4E. So that's why the check. */ - pci_read_config_dword(pdev, 0x80, &lcr_val); - pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00)); - - /* Arrange for access to Target SoC registers. */ - mem = pci_iomap(pdev, BAR_NUM, 0); - if (!mem) { - ath10k_err("PCI iomap error\n"); - ret = -EIO; - goto err_master; + if (is_qca988x) { + bus_params.chip_id = + ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); + if (bus_params.chip_id != 0xffffffff) { + if (!ath10k_pci_chip_is_supported(pdev->device, + bus_params.chip_id)) { + ret = -ENODEV; + goto err_unsupported; + } + } } - ar_pci->mem = mem; - - spin_lock_init(&ar_pci->ce_lock); - - ar_pci->cacheline_sz = dma_get_cache_alignment(); - - ret = ath10k_pci_start_intr(ar); + ret = ath10k_pci_chip_reset(ar); if (ret) { - ath10k_err("could not start interrupt handling (%d)\n", ret); - goto err_iomap; + ath10k_err(ar, "failed to reset chip: %d\n", ret); + goto err_free_irq; } - /* - * Bring the target up cleanly. - * - * The target may be in an undefined state with an AUX-powered Target - * and a Host in WoW mode. If the Host crashes, loses power, or is - * restarted (without unloading the driver) then the Target is left - * (aux) powered and running. On a subsequent driver load, the Target - * is in an unexpected state. We try to catch that here in order to - * reset the Target and retry the probe. - */ - ath10k_pci_device_reset(ar_pci); - - ret = ath10k_pci_reset_target(ar); - if (ret) - goto err_intr; - - if (ath10k_target_ps) { - ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save enabled\n"); - } else { - /* Force AWAKE forever */ - ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save disabled\n"); - ath10k_do_pci_wake(ar); + bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); + if (bus_params.chip_id == 0xffffffff) { + ret = -ENODEV; + goto err_unsupported; } - ret = ath10k_pci_ce_init(ar); - if (ret) - goto err_intr; - - ret = ath10k_pci_init_config(ar); - if (ret) - goto err_ce; - - ret = ath10k_pci_wake_target_cpu(ar); - if (ret) { - ath10k_err("could not wake up target CPU (%d)\n", ret); - goto err_ce; + if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) { + ret = -ENODEV; + goto err_unsupported; } - ret = ath10k_core_register(ar); + ret = ath10k_core_register(ar, &bus_params); if (ret) { - ath10k_err("could not register driver core (%d)\n", ret); - goto err_ce; + ath10k_err(ar, "failed to register driver core: %d\n", ret); + goto err_free_irq; } return 0; -err_ce: - ath10k_pci_ce_deinit(ar); -err_intr: - ath10k_pci_stop_intr(ar); -err_iomap: - pci_iounmap(pdev, mem); -err_master: - pci_clear_master(pdev); -err_region: - pci_release_region(pdev, BAR_NUM); -err_device: - pci_disable_device(pdev); -err_ar: - pci_set_drvdata(pdev, NULL); +err_unsupported: + ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n", + pdev->device, bus_params.chip_id); + +err_free_irq: + ath10k_pci_free_irq(ar); + +err_deinit_irq: + ath10k_pci_release_resource(ar); + +err_sleep: + ath10k_pci_sleep_sync(ar); + ath10k_pci_release(ar); + +err_free_pipes: + ath10k_pci_free_pipes(ar); + +err_core_destroy: ath10k_core_destroy(ar); -err_ar_pci: - /* call HIF PCI free here */ - kfree(ar_pci); return ret; } @@ -2318,190 +3735,121 @@ err_ar_pci: static void ath10k_pci_remove(struct pci_dev *pdev) { struct ath10k *ar = pci_get_drvdata(pdev); - struct ath10k_pci *ar_pci; - ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n"); if (!ar) return; - ar_pci = ath10k_pci_priv(ar); - - if (!ar_pci) - return; - - tasklet_kill(&ar_pci->msi_fw_err); - ath10k_core_unregister(ar); - ath10k_pci_stop_intr(ar); - - pci_set_drvdata(pdev, NULL); - pci_iounmap(pdev, ar_pci->mem); - pci_release_region(pdev, BAR_NUM); - pci_clear_master(pdev); - pci_disable_device(pdev); - + ath10k_pci_free_irq(ar); + ath10k_pci_deinit_irq(ar); + ath10k_pci_release_resource(ar); + ath10k_pci_sleep_sync(ar); + ath10k_pci_release(ar); ath10k_core_destroy(ar); - kfree(ar_pci); } -#if defined(CONFIG_PM_SLEEP) - -#define ATH10K_PCI_PM_CONTROL 0x44 +MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); -static int ath10k_pci_suspend(struct device *device) +static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(device); - struct ath10k *ar = pci_get_drvdata(pdev); - struct ath10k_pci *ar_pci; - u32 val; - int ret, retval; - - ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); - - if (!ar) - return -ENODEV; - - ar_pci = ath10k_pci_priv(ar); - if (!ar_pci) - return -ENODEV; - - if (ath10k_core_target_suspend(ar)) - return -EBUSY; - - ret = wait_event_interruptible_timeout(ar->event_queue, - ar->is_target_paused == true, - 1 * HZ); - if (ret < 0) { - ath10k_warn("suspend interrupted (%d)\n", ret); - retval = ret; - goto resume; - } else if (ret == 0) { - ath10k_warn("suspend timed out - target pause event never came\n"); - retval = EIO; - goto resume; - } - - /* - * reset is_target_paused and host can check that in next time, - * or it will always be TRUE and host just skip the waiting - * condition, it causes target assert due to host already - * suspend - */ - ar->is_target_paused = false; - - pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val); - - if ((val & 0x000000ff) != 0x3) { - pci_save_state(pdev); - pci_disable_device(pdev); - pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL, - (val & 0xffffff00) | 0x03); - } + struct ath10k *ar = dev_get_drvdata(dev); + int ret; - return 0; -resume: - ret = ath10k_core_target_resume(ar); + ret = ath10k_pci_suspend(ar); if (ret) - ath10k_warn("could not resume (%d)\n", ret); + ath10k_warn(ar, "failed to suspend hif: %d\n", ret); - return retval; + return ret; } -static int ath10k_pci_resume(struct device *device) +static __maybe_unused int ath10k_pci_pm_resume(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(device); - struct ath10k *ar = pci_get_drvdata(pdev); - struct ath10k_pci *ar_pci; + struct ath10k *ar = dev_get_drvdata(dev); int ret; - u32 val; - - ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); - - if (!ar) - return -ENODEV; - ar_pci = ath10k_pci_priv(ar); - - if (!ar_pci) - return -ENODEV; - ret = pci_enable_device(pdev); - if (ret) { - ath10k_warn("cannot enable PCI device: %d\n", ret); - return ret; - } - - pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val); - - if ((val & 0x000000ff) != 0) { - pci_restore_state(pdev); - pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL, - val & 0xffffff00); - /* - * Suspend/Resume resets the PCI configuration space, - * so we have to re-disable the RETRY_TIMEOUT register (0x41) - * to keep PCI Tx retries from interfering with C3 CPU state - */ - pci_read_config_dword(pdev, 0x40, &val); - - if ((val & 0x0000ff00) != 0) - pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); - } - - ret = ath10k_core_target_resume(ar); + ret = ath10k_pci_resume(ar); if (ret) - ath10k_warn("target resume failed: %d\n", ret); + ath10k_warn(ar, "failed to resume hif: %d\n", ret); return ret; } -static SIMPLE_DEV_PM_OPS(ath10k_dev_pm_ops, - ath10k_pci_suspend, - ath10k_pci_resume); - -#define ATH10K_PCI_PM_OPS (&ath10k_dev_pm_ops) - -#else - -#define ATH10K_PCI_PM_OPS NULL - -#endif /* CONFIG_PM_SLEEP */ - -MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); +static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops, + ath10k_pci_pm_suspend, + ath10k_pci_pm_resume); static struct pci_driver ath10k_pci_driver = { .name = "ath10k_pci", .id_table = ath10k_pci_id_table, .probe = ath10k_pci_probe, .remove = ath10k_pci_remove, - .driver.pm = ATH10K_PCI_PM_OPS, +#ifdef CONFIG_PM + .driver.pm = &ath10k_pci_pm_ops, +#endif }; static int __init ath10k_pci_init(void) { - int ret; + int ret1, ret2; - ret = pci_register_driver(&ath10k_pci_driver); - if (ret) - ath10k_err("pci_register_driver failed [%d]\n", ret); + ret1 = pci_register_driver(&ath10k_pci_driver); + if (ret1) + printk(KERN_ERR "failed to register ath10k pci driver: %d\n", + ret1); - return ret; + ret2 = ath10k_ahb_init(); + if (ret2) + printk(KERN_ERR "ahb init failed: %d\n", ret2); + + if (ret1 && ret2) + return ret1; + + /* registered to at least one bus */ + return 0; } module_init(ath10k_pci_init); static void __exit ath10k_pci_exit(void) { pci_unregister_driver(&ath10k_pci_driver); + ath10k_ahb_exit(); } module_exit(ath10k_pci_exit); MODULE_AUTHOR("Qualcomm Atheros"); -MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); +MODULE_DESCRIPTION("Driver support for Qualcomm Atheros PCIe/AHB 802.11ac WLAN devices"); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE); -MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE); -MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE); -MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE); -MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE); -MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); + +/* QCA988x 2.0 firmware files */ +MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE); +MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE); +MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE); +MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE); +MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE); +MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); + +/* QCA9887 1.0 firmware files */ +MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE); +MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE); +MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); + +/* QCA6174 2.1 firmware files */ +MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE); +MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE); +MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_DATA_FILE); +MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE); + +/* QCA6174 3.1 firmware files */ +MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE); +MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE); +MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE); +MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE); +MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); + +/* QCA9377 1.0 firmware files */ +MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API6_FILE); +MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE); +MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE); diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index d2a055a07dc6..4c3f536f2ea1 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -1,35 +1,19 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _PCI_H_ #define _PCI_H_ #include <linux/interrupt.h> +#include <linux/mutex.h> #include "hw.h" #include "ce.h" - -/* FW dump area */ -#define REG_DUMP_COUNT_QCA988X 60 - -/* - * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite - */ -#define DIAG_TRANSFER_LIMIT 2048 +#include "ahb.h" /* * maximum number of bytes that can be @@ -38,27 +22,12 @@ #define DIAG_TRANSFER_LIMIT 2048 struct bmi_xfer { - struct completion done; + bool tx_done; + bool rx_done; bool wait_for_resp; u32 resp_len; }; -struct ath10k_pci_compl { - struct list_head list; - int send_or_recv; - struct ce_state *ce_state; - struct hif_ce_pipe_info *pipe_info; - void *transfer_context; - unsigned int nbytes; - unsigned int transfer_id; - unsigned int flags; -}; - -/* compl_state.send_or_recv */ -#define HIF_CE_COMPLETE_FREE 0 -#define HIF_CE_COMPLETE_SEND 1 -#define HIF_CE_COMPLETE_RECV 2 - /* * PCI-specific Target state * @@ -108,62 +77,12 @@ struct pcie_state { /* PCIE_CONFIG_FLAG definitions */ #define PCIE_CONFIG_FLAG_ENABLE_L1 0x0000001 -/* Host software's Copy Engine configuration. */ -#define CE_ATTR_FLAGS 0 - -/* - * Configuration information for a Copy Engine pipe. - * Passed from Host to Target during startup (one per CE). - * - * NOTE: Structure is shared between Host software and Target firmware! - */ -struct ce_pipe_config { - u32 pipenum; - u32 pipedir; - u32 nentries; - u32 nbytes_max; - u32 flags; - u32 reserved; -}; - -/* - * Directions for interconnect pipe configuration. - * These definitions may be used during configuration and are shared - * between Host and Target. - * - * Pipe Directions are relative to the Host, so PIPEDIR_IN means - * "coming IN over air through Target to Host" as with a WiFi Rx operation. - * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air" - * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man" - * Target since things that are "PIPEDIR_OUT" are coming IN to the Target - * over the interconnect. - */ -#define PIPEDIR_NONE 0 -#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */ -#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */ -#define PIPEDIR_INOUT 3 /* bidirectional */ - -/* Establish a mapping between a service/direction and a pipe. */ -struct service_to_pipe { - u32 service_id; - u32 pipedir; - u32 pipenum; -}; - -enum ath10k_pci_features { - ATH10K_PCI_FEATURE_MSI_X = 0, - ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND = 1, - - /* keep last */ - ATH10K_PCI_FEATURE_COUNT -}; - /* Per-pipe state. */ -struct hif_ce_pipe_info { +struct ath10k_pci_pipe { /* Handle of underlying Copy Engine */ - struct ce_state *ce_hdl; + struct ath10k_ce_pipe *ce_hdl; - /* Our pipe number; facilitiates use of pipe_info ptrs. */ + /* Our pipe number; facilitates use of pipe_info ptrs. */ u8 pipe_num; /* Convenience back pointer to hif_ce_state. */ @@ -173,15 +92,17 @@ struct hif_ce_pipe_info { /* protects compl_free and num_send_allowed */ spinlock_t pipe_lock; +}; - /* List of free CE completion slots */ - struct list_head compl_free; - - /* Limit the number of outstanding send requests. */ - int num_sends_allowed; +struct ath10k_pci_supp_chip { + u32 dev_id; + u32 rev_id; +}; - struct ath10k_pci *ar_pci; - struct tasklet_struct intr; +enum ath10k_pci_irq_mode { + ATH10K_PCI_IRQ_AUTO = 0, + ATH10K_PCI_IRQ_INTX = 1, + ATH10K_PCI_IRQ_MSI = 2, }; struct ath10k_pci { @@ -189,167 +110,151 @@ struct ath10k_pci { struct device *dev; struct ath10k *ar; void __iomem *mem; - int cacheline_sz; + size_t mem_len; - DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT); + /* Operating interrupt mode */ + enum ath10k_pci_irq_mode oper_irq_mode; - /* - * Number of MSI interrupts granted, 0 --> using legacy PCI line - * interrupts. - */ - int num_msi_intrs; + struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX]; - struct tasklet_struct intr_tq; - struct tasklet_struct msi_fw_err; + /* Copy Engine used for Diagnostic Accesses */ + struct ath10k_ce_pipe *ce_diag; + /* For protecting ce_diag */ + struct mutex ce_diag_mutex; - /* Number of Copy Engines supported */ - unsigned int ce_count; + struct work_struct dump_work; - int started; + struct ath10k_ce ce; + struct timer_list rx_post_retry; - atomic_t keep_awake_count; - bool verified_awake; + /* Due to HW quirks it is recommended to disable ASPM during device + * bootup. To do that the original PCI-E Link Control is stored before + * device bootup is executed and re-programmed later. + */ + u16 link_ctl; - /* List of CE completions to be processed */ - struct list_head compl_process; + /* Protects ps_awake and ps_wake_refcount */ + spinlock_t ps_lock; - /* protects compl_processing and compl_process */ - spinlock_t compl_lock; + /* The device has a special powersave-oriented register. When device is + * considered asleep it drains less power and driver is forbidden from + * accessing most MMIO registers. If host were to access them without + * waking up the device might scribble over host memory or return + * 0xdeadbeef readouts. + */ + unsigned long ps_wake_refcount; + + /* Waking up takes some time (up to 2ms in some cases) so it can be bad + * for latency. To mitigate this the device isn't immediately allowed + * to sleep after all references are undone - instead there's a grace + * period after which the powersave register is updated unless some + * activity to/from device happened in the meantime. + * + * Also see comments on ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC. + */ + struct timer_list ps_timer; - bool compl_processing; + /* MMIO registers are used to communicate with the device. With + * intensive traffic accessing powersave register would be a bit + * wasteful overhead and would needlessly stall CPU. It is far more + * efficient to rely on a variable in RAM and update it only upon + * powersave register state changes. + */ + bool ps_awake; - struct hif_ce_pipe_info pipe_info[CE_COUNT_MAX]; + /* pci power save, disable for QCA988X and QCA99X0. + * Writing 'false' to this variable avoids frequent locking + * on MMIO read/write. + */ + bool pci_ps; - struct ath10k_hif_cb msg_callbacks_current; + /* Chip specific pci reset routine used to do a safe reset */ + int (*pci_soft_reset)(struct ath10k *ar); - /* Target address used to signal a pending firmware event */ - u32 fw_indicator_address; + /* Chip specific pci full reset function */ + int (*pci_hard_reset)(struct ath10k *ar); - /* Copy Engine used for Diagnostic Accesses */ - struct ce_state *ce_diag; + /* chip specific methods for converting target CPU virtual address + * space to CE address space + */ + u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr); - /* FIXME: document what this really protects */ - spinlock_t ce_lock; + struct ce_attr *attr; + struct ce_pipe_config *pipe_config; + struct ce_service_to_pipe *serv_to_pipe; - /* Map CE id to ce_state */ - struct ce_state *ce_id_to_state[CE_COUNT_MAX]; + /* Keep this entry in the last, memory for struct ath10k_ahb is + * allocated (ahb support enabled case) in the continuation of + * this struct. + */ + struct ath10k_ahb ahb[]; - /* makes sure that dummy reads are atomic */ - spinlock_t hw_v1_workaround_lock; }; static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) { - return ar->hif.priv; -} - -static inline u32 ath10k_pci_reg_read32(void __iomem *mem, u32 addr) -{ - return ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + addr); -} - -static inline void ath10k_pci_reg_write32(void __iomem *mem, u32 addr, u32 val) -{ - iowrite32(val, mem + PCIE_LOCAL_BASE_ADDRESS + addr); + return (struct ath10k_pci *)ar->drv_priv; } +#define ATH10K_PCI_RX_POST_RETRY_MS 50 #define ATH_PCI_RESET_WAIT_MAX 10 /* ms */ -#define PCIE_WAKE_TIMEOUT 5000 /* 5ms */ +#define PCIE_WAKE_TIMEOUT 30000 /* 30ms */ +#define PCIE_WAKE_LATE_US 10000 /* 10ms */ #define BAR_NUM 0 #define CDC_WAR_MAGIC_STR 0xceef0000 #define CDC_WAR_DATA_CE 4 -/* - * TODO: Should be a function call specific to each Target-type. - * This convoluted macro converts from Target CPU Virtual Address Space to CE - * Address Space. As part of this process, we conservatively fetch the current - * PCIE_BAR. MOST of the time, this should match the upper bits of PCI space - * for this device; but that's not guaranteed. - */ -#define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \ - (((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS| \ - CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \ - 0x100000 | ((addr) & 0xfffff)) - /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */ -#define DIAG_ACCESS_CE_TIMEOUT_MS 10 - -/* - * This API allows the Host to access Target registers directly - * and relatively efficiently over PCIe. - * This allows the Host to avoid extra overhead associated with - * sending a message to firmware and waiting for a response message - * from firmware, as is done on other interconnects. - * - * Yet there is some complexity with direct accesses because the - * Target's power state is not known a priori. The Host must issue - * special PCIe reads/writes in order to explicitly wake the Target - * and to verify that it is awake and will remain awake. - * - * Usage: - * - * Use ath10k_pci_read32 and ath10k_pci_write32 to access Target space. - * These calls must be bracketed by ath10k_pci_wake and - * ath10k_pci_sleep. A single BEGIN/END pair is adequate for - * multiple READ/WRITE operations. - * - * Use ath10k_pci_wake to put the Target in a state in - * which it is legal for the Host to directly access it. This - * may involve waking the Target from a low power state, which - * may take up to 2Ms! - * - * Use ath10k_pci_sleep to tell the Target that as far as - * this code path is concerned, it no longer needs to remain - * directly accessible. BEGIN/END is under a reference counter; - * multiple code paths may issue BEGIN/END on a single targid. +#define DIAG_ACCESS_CE_TIMEOUT_US 10000 /* 10 ms */ +#define DIAG_ACCESS_CE_WAIT_US 50 + +void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value); +void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val); +void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val); + +u32 ath10k_pci_read32(struct ath10k *ar, u32 offset); +u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr); +u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr); + +int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, + struct ath10k_hif_sg_item *items, int n_items); +int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf, + size_t buf_len); +int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, + const void *data, int nbytes); +int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, void *req, u32 req_len, + void *resp, u32 *resp_len); +int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe); +void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, u8 *ul_pipe, + u8 *dl_pipe); +void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, + int force); +u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe); +void ath10k_pci_hif_power_down(struct ath10k *ar); +int ath10k_pci_alloc_pipes(struct ath10k *ar); +void ath10k_pci_free_pipes(struct ath10k *ar); +void ath10k_pci_rx_replenish_retry(struct timer_list *t); +void ath10k_pci_ce_deinit(struct ath10k *ar); +void ath10k_pci_init_napi(struct ath10k *ar); +int ath10k_pci_init_pipes(struct ath10k *ar); +int ath10k_pci_init_config(struct ath10k *ar); +void ath10k_pci_rx_post(struct ath10k *ar); +void ath10k_pci_flush(struct ath10k *ar); +void ath10k_pci_enable_intx_irq(struct ath10k *ar); +bool ath10k_pci_irq_pending(struct ath10k *ar); +void ath10k_pci_disable_and_clear_intx_irq(struct ath10k *ar); +void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar); +int ath10k_pci_wait_for_target_init(struct ath10k *ar); +int ath10k_pci_setup_resource(struct ath10k *ar); +void ath10k_pci_release_resource(struct ath10k *ar); + +/* QCA6174 is known to have Tx/Rx issues when SOC_WAKE register is poked too + * frequently. To avoid this put SoC to sleep after a very conservative grace + * period. Adjust with great care. */ -static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, - u32 value) -{ - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - void __iomem *addr = ar_pci->mem; - - if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) { - unsigned long irq_flags; - - spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags); - - ioread32(addr+offset+4); /* 3rd read prior to write */ - ioread32(addr+offset+4); /* 2nd read prior to write */ - ioread32(addr+offset+4); /* 1st read prior to write */ - iowrite32(value, addr+offset); - - spin_unlock_irqrestore(&ar_pci->hw_v1_workaround_lock, - irq_flags); - } else { - iowrite32(value, addr+offset); - } -} - -static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) -{ - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - - return ioread32(ar_pci->mem + offset); -} - -extern unsigned int ath10k_target_ps; - -void ath10k_do_pci_wake(struct ath10k *ar); -void ath10k_do_pci_sleep(struct ath10k *ar); - -static inline void ath10k_pci_wake(struct ath10k *ar) -{ - if (ath10k_target_ps) - ath10k_do_pci_wake(ar); -} - -static inline void ath10k_pci_sleep(struct ath10k *ar) -{ - if (ath10k_target_ps) - ath10k_do_pci_sleep(ar); -} +#define ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC 60 #endif /* _PCI_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c new file mode 100644 index 000000000000..8275345631a0 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/qmi.c @@ -0,0 +1,1140 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/debugfs.h> +#include <linux/idr.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/module.h> +#include <linux/net.h> +#include <linux/platform_device.h> +#include <linux/firmware/qcom/qcom_scm.h> +#include <linux/soc/qcom/smem.h> +#include <linux/string.h> +#include <net/sock.h> + +#include "debug.h" +#include "snoc.h" + +#define ATH10K_QMI_CLIENT_ID 0x4b4e454c +#define ATH10K_QMI_TIMEOUT 30 +#define SMEM_IMAGE_VERSION_TABLE 469 +#define SMEM_IMAGE_TABLE_CNSS_INDEX 13 +#define SMEM_IMAGE_VERSION_ENTRY_SIZE 128 +#define SMEM_IMAGE_VERSION_NAME_SIZE 75 + +static int ath10k_qmi_map_msa_permission(struct ath10k_qmi *qmi, + struct ath10k_msa_mem_info *mem_info) +{ + struct qcom_scm_vmperm dst_perms[3]; + struct ath10k *ar = qmi->ar; + u64 src_perms; + u32 perm_count; + int ret; + + src_perms = BIT(QCOM_SCM_VMID_HLOS); + + dst_perms[0].vmid = QCOM_SCM_VMID_MSS_MSA; + dst_perms[0].perm = QCOM_SCM_PERM_RW; + dst_perms[1].vmid = QCOM_SCM_VMID_WLAN; + dst_perms[1].perm = QCOM_SCM_PERM_RW; + + if (mem_info->secure) { + perm_count = 2; + } else { + dst_perms[2].vmid = QCOM_SCM_VMID_WLAN_CE; + dst_perms[2].perm = QCOM_SCM_PERM_RW; + perm_count = 3; + } + + ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size, + &src_perms, dst_perms, perm_count); + if (ret < 0) + ath10k_err(ar, "failed to assign msa map permissions: %d\n", ret); + + return ret; +} + +static int ath10k_qmi_unmap_msa_permission(struct ath10k_qmi *qmi, + struct ath10k_msa_mem_info *mem_info) +{ + struct qcom_scm_vmperm dst_perms; + struct ath10k *ar = qmi->ar; + u64 src_perms; + int ret; + + src_perms = BIT(QCOM_SCM_VMID_MSS_MSA) | BIT(QCOM_SCM_VMID_WLAN); + + if (!mem_info->secure) + src_perms |= BIT(QCOM_SCM_VMID_WLAN_CE); + + dst_perms.vmid = QCOM_SCM_VMID_HLOS; + dst_perms.perm = QCOM_SCM_PERM_RW; + + ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size, + &src_perms, &dst_perms, 1); + if (ret < 0) + ath10k_err(ar, "failed to unmap msa permissions: %d\n", ret); + + return ret; +} + +static int ath10k_qmi_setup_msa_permissions(struct ath10k_qmi *qmi) +{ + int ret; + int i; + + if (qmi->msa_fixed_perm) + return 0; + + for (i = 0; i < qmi->nr_mem_region; i++) { + ret = ath10k_qmi_map_msa_permission(qmi, &qmi->mem_region[i]); + if (ret) + goto err_unmap; + } + + return 0; + +err_unmap: + for (i--; i >= 0; i--) + ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]); + return ret; +} + +static void ath10k_qmi_remove_msa_permission(struct ath10k_qmi *qmi) +{ + int i; + + if (qmi->msa_fixed_perm) + return; + + for (i = 0; i < qmi->nr_mem_region; i++) + ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]); +} + +static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi) +{ + struct wlfw_msa_info_resp_msg_v01 resp = {}; + struct wlfw_msa_info_req_msg_v01 req = {}; + struct ath10k *ar = qmi->ar; + phys_addr_t max_mapped_addr; + struct qmi_txn txn; + int ret; + int i; + + req.msa_addr = ar->msa.paddr; + req.size = ar->msa.mem_size; + + ret = qmi_txn_init(&qmi->qmi_hdl, &txn, + wlfw_msa_info_resp_msg_v01_ei, &resp); + if (ret < 0) + goto out; + + ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, + QMI_WLFW_MSA_INFO_REQ_V01, + WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN, + wlfw_msa_info_req_msg_v01_ei, &req); + if (ret < 0) { + qmi_txn_cancel(&txn); + ath10k_err(ar, "failed to send msa mem info req: %d\n", ret); + goto out; + } + + ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); + if (ret < 0) + goto out; + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + ath10k_err(ar, "msa info req rejected: %d\n", resp.resp.error); + ret = -EINVAL; + goto out; + } + + if (resp.mem_region_info_len > QMI_WLFW_MAX_MEM_REG_V01) { + ath10k_err(ar, "invalid memory region length received: %d\n", + resp.mem_region_info_len); + ret = -EINVAL; + goto out; + } + + max_mapped_addr = ar->msa.paddr + ar->msa.mem_size; + qmi->nr_mem_region = resp.mem_region_info_len; + for (i = 0; i < resp.mem_region_info_len; i++) { + if (resp.mem_region_info[i].size > ar->msa.mem_size || + resp.mem_region_info[i].region_addr > max_mapped_addr || + resp.mem_region_info[i].region_addr < ar->msa.paddr || + resp.mem_region_info[i].size + + resp.mem_region_info[i].region_addr > max_mapped_addr) { + ath10k_err(ar, "received out of range memory region address 0x%llx with size 0x%x, aborting\n", + resp.mem_region_info[i].region_addr, + resp.mem_region_info[i].size); + ret = -EINVAL; + goto fail_unwind; + } + qmi->mem_region[i].addr = resp.mem_region_info[i].region_addr; + qmi->mem_region[i].size = resp.mem_region_info[i].size; + qmi->mem_region[i].secure = resp.mem_region_info[i].secure_flag; + ath10k_dbg(ar, ATH10K_DBG_QMI, + "qmi msa mem region %d addr 0x%pa size 0x%x flag 0x%08x\n", + i, &qmi->mem_region[i].addr, + qmi->mem_region[i].size, + qmi->mem_region[i].secure); + } + + ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem info request completed\n"); + return 0; + +fail_unwind: + memset(&qmi->mem_region[0], 0, sizeof(qmi->mem_region[0]) * i); +out: + return ret; +} + +static int ath10k_qmi_msa_ready_send_sync_msg(struct ath10k_qmi *qmi) +{ + struct wlfw_msa_ready_resp_msg_v01 resp = {}; + struct wlfw_msa_ready_req_msg_v01 req = {}; + struct ath10k *ar = qmi->ar; + struct qmi_txn txn; + int ret; + + ret = qmi_txn_init(&qmi->qmi_hdl, &txn, + wlfw_msa_ready_resp_msg_v01_ei, &resp); + if (ret < 0) + goto out; + + ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, + QMI_WLFW_MSA_READY_REQ_V01, + WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN, + wlfw_msa_ready_req_msg_v01_ei, &req); + if (ret < 0) { + qmi_txn_cancel(&txn); + ath10k_err(ar, "failed to send msa mem ready request: %d\n", ret); + goto out; + } + + ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); + if (ret < 0) + goto out; + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + ath10k_err(ar, "msa ready request rejected: %d\n", resp.resp.error); + ret = -EINVAL; + } + + ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem ready request completed\n"); + return 0; + +out: + return ret; +} + +static int ath10k_qmi_bdf_dnld_send_sync(struct ath10k_qmi *qmi) +{ + struct wlfw_bdf_download_resp_msg_v01 resp = {}; + struct wlfw_bdf_download_req_msg_v01 *req; + struct ath10k *ar = qmi->ar; + unsigned int remaining; + struct qmi_txn txn; + const u8 *temp; + int ret; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + temp = ar->normal_mode_fw.board_data; + remaining = ar->normal_mode_fw.board_len; + + while (remaining) { + req->valid = 1; + req->file_id_valid = 1; + req->file_id = 0; + req->total_size_valid = 1; + req->total_size = ar->normal_mode_fw.board_len; + req->seg_id_valid = 1; + req->data_valid = 1; + req->end_valid = 1; + + if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) { + req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01; + } else { + req->data_len = remaining; + req->end = 1; + } + + memcpy(req->data, temp, req->data_len); + + ret = qmi_txn_init(&qmi->qmi_hdl, &txn, + wlfw_bdf_download_resp_msg_v01_ei, + &resp); + if (ret < 0) + goto out; + + ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, + QMI_WLFW_BDF_DOWNLOAD_REQ_V01, + WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN, + wlfw_bdf_download_req_msg_v01_ei, req); + if (ret < 0) { + qmi_txn_cancel(&txn); + goto out; + } + + ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); + + if (ret < 0) + goto out; + + /* end = 1 triggers a CRC check on the BDF. If this fails, we + * get a QMI_ERR_MALFORMED_MSG_V01 error, but the FW is still + * willing to use the BDF. For some platforms, all the valid + * released BDFs fail this CRC check, so attempt to detect this + * scenario and treat it as non-fatal. + */ + if (resp.resp.result != QMI_RESULT_SUCCESS_V01 && + !(req->end == 1 && + resp.resp.result == QMI_ERR_MALFORMED_MSG_V01)) { + ath10k_err(ar, "failed to download board data file: %d\n", + resp.resp.error); + ret = -EINVAL; + goto out; + } + + remaining -= req->data_len; + temp += req->data_len; + req->seg_id++; + } + + ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi bdf download request completed\n"); + + kfree(req); + return 0; + +out: + kfree(req); + return ret; +} + +static int ath10k_qmi_send_cal_report_req(struct ath10k_qmi *qmi) +{ + struct wlfw_cal_report_resp_msg_v01 resp = {}; + struct wlfw_cal_report_req_msg_v01 req = {}; + struct ath10k *ar = qmi->ar; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct qmi_txn txn; + int i, j = 0; + int ret; + + if (ar_snoc->xo_cal_supported) { + req.xo_cal_data_valid = 1; + req.xo_cal_data = ar_snoc->xo_cal_data; + } + + ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cal_report_resp_msg_v01_ei, + &resp); + if (ret < 0) + goto out; + + for (i = 0; i < QMI_WLFW_MAX_NUM_CAL_V01; i++) { + if (qmi->cal_data[i].total_size && + qmi->cal_data[i].data) { + req.meta_data[j] = qmi->cal_data[i].cal_id; + j++; + } + } + req.meta_data_len = j; + + ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, + QMI_WLFW_CAL_REPORT_REQ_V01, + WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN, + wlfw_cal_report_req_msg_v01_ei, &req); + if (ret < 0) { + qmi_txn_cancel(&txn); + ath10k_err(ar, "failed to send calibration request: %d\n", ret); + goto out; + } + + ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); + if (ret < 0) + goto out; + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + ath10k_err(ar, "calibration request rejected: %d\n", resp.resp.error); + ret = -EINVAL; + goto out; + } + + ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi cal report request completed\n"); + return 0; + +out: + return ret; +} + +static int +ath10k_qmi_mode_send_sync_msg(struct ath10k *ar, enum wlfw_driver_mode_enum_v01 mode) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_qmi *qmi = ar_snoc->qmi; + struct wlfw_wlan_mode_resp_msg_v01 resp = {}; + struct wlfw_wlan_mode_req_msg_v01 req = {}; + struct qmi_txn txn; + int ret; + + ret = qmi_txn_init(&qmi->qmi_hdl, &txn, + wlfw_wlan_mode_resp_msg_v01_ei, + &resp); + if (ret < 0) + goto out; + + req.mode = mode; + req.hw_debug_valid = 1; + req.hw_debug = 0; + + ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, + QMI_WLFW_WLAN_MODE_REQ_V01, + WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN, + wlfw_wlan_mode_req_msg_v01_ei, &req); + if (ret < 0) { + qmi_txn_cancel(&txn); + ath10k_err(ar, "failed to send wlan mode %d request: %d\n", mode, ret); + goto out; + } + + ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); + if (ret < 0) + goto out; + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + ath10k_err(ar, "more request rejected: %d\n", resp.resp.error); + ret = -EINVAL; + goto out; + } + + ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wlan mode req completed: %d\n", mode); + return 0; + +out: + return ret; +} + +static int +ath10k_qmi_cfg_send_sync_msg(struct ath10k *ar, + struct ath10k_qmi_wlan_enable_cfg *config, + const char *version) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_qmi *qmi = ar_snoc->qmi; + struct wlfw_wlan_cfg_resp_msg_v01 resp = {}; + struct wlfw_wlan_cfg_req_msg_v01 *req; + struct qmi_txn txn; + int ret; + u32 i; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + ret = qmi_txn_init(&qmi->qmi_hdl, &txn, + wlfw_wlan_cfg_resp_msg_v01_ei, + &resp); + if (ret < 0) + goto out; + + req->host_version_valid = 0; + + req->tgt_cfg_valid = 1; + if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01) + req->tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01; + else + req->tgt_cfg_len = config->num_ce_tgt_cfg; + for (i = 0; i < req->tgt_cfg_len; i++) { + req->tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num; + req->tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir; + req->tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries; + req->tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max; + req->tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags; + } + + req->svc_cfg_valid = 1; + if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01) + req->svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01; + else + req->svc_cfg_len = config->num_ce_svc_pipe_cfg; + for (i = 0; i < req->svc_cfg_len; i++) { + req->svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id; + req->svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir; + req->svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num; + } + + req->shadow_reg_valid = 1; + if (config->num_shadow_reg_cfg > + QMI_WLFW_MAX_NUM_SHADOW_REG_V01) + req->shadow_reg_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01; + else + req->shadow_reg_len = config->num_shadow_reg_cfg; + + memcpy(req->shadow_reg, config->shadow_reg_cfg, + sizeof(struct wlfw_shadow_reg_cfg_s_v01) * req->shadow_reg_len); + + ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, + QMI_WLFW_WLAN_CFG_REQ_V01, + WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN, + wlfw_wlan_cfg_req_msg_v01_ei, req); + if (ret < 0) { + qmi_txn_cancel(&txn); + ath10k_err(ar, "failed to send config request: %d\n", ret); + goto out; + } + + ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); + if (ret < 0) + goto out; + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + ath10k_err(ar, "config request rejected: %d\n", resp.resp.error); + ret = -EINVAL; + goto out; + } + + ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi config request completed\n"); + kfree(req); + return 0; + +out: + kfree(req); + return ret; +} + +int ath10k_qmi_wlan_enable(struct ath10k *ar, + struct ath10k_qmi_wlan_enable_cfg *config, + enum wlfw_driver_mode_enum_v01 mode, + const char *version) +{ + int ret; + + ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi mode %d config %p\n", + mode, config); + + ret = ath10k_qmi_cfg_send_sync_msg(ar, config, version); + if (ret) { + ath10k_err(ar, "failed to send qmi config: %d\n", ret); + return ret; + } + + ret = ath10k_qmi_mode_send_sync_msg(ar, mode); + if (ret) { + ath10k_err(ar, "failed to send qmi mode: %d\n", ret); + return ret; + } + + return 0; +} + +int ath10k_qmi_wlan_disable(struct ath10k *ar) +{ + return ath10k_qmi_mode_send_sync_msg(ar, QMI_WLFW_OFF_V01); +} + +static void ath10k_qmi_add_wlan_ver_smem(struct ath10k *ar, const char *fw_build_id) +{ + u8 *table_ptr; + size_t smem_item_size; + const u32 smem_img_idx_wlan = SMEM_IMAGE_TABLE_CNSS_INDEX * + SMEM_IMAGE_VERSION_ENTRY_SIZE; + + table_ptr = qcom_smem_get(QCOM_SMEM_HOST_ANY, + SMEM_IMAGE_VERSION_TABLE, + &smem_item_size); + + if (IS_ERR(table_ptr)) { + ath10k_err(ar, "smem image version table not found\n"); + return; + } + + if (smem_img_idx_wlan + SMEM_IMAGE_VERSION_ENTRY_SIZE > + smem_item_size) { + ath10k_err(ar, "smem block size too small: %zu\n", + smem_item_size); + return; + } + + strscpy(table_ptr + smem_img_idx_wlan, fw_build_id, + SMEM_IMAGE_VERSION_NAME_SIZE); +} + +static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi) +{ + struct wlfw_cap_resp_msg_v01 *resp; + struct wlfw_cap_req_msg_v01 req = {}; + struct ath10k *ar = qmi->ar; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct qmi_txn txn; + int ret; + + resp = kzalloc(sizeof(*resp), GFP_KERNEL); + if (!resp) + return -ENOMEM; + + ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cap_resp_msg_v01_ei, resp); + if (ret < 0) + goto out; + + ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, + QMI_WLFW_CAP_REQ_V01, + WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN, + wlfw_cap_req_msg_v01_ei, &req); + if (ret < 0) { + qmi_txn_cancel(&txn); + ath10k_err(ar, "failed to send capability request: %d\n", ret); + goto out; + } + + ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); + if (ret < 0) + goto out; + + if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { + ath10k_err(ar, "capability req rejected: %d\n", resp->resp.error); + ret = -EINVAL; + goto out; + } + + if (resp->chip_info_valid) { + qmi->chip_info.chip_id = resp->chip_info.chip_id; + qmi->chip_info.chip_family = resp->chip_info.chip_family; + } else { + qmi->chip_info.chip_id = 0xFF; + } + + if (resp->board_info_valid) + qmi->board_info.board_id = resp->board_info.board_id; + else + qmi->board_info.board_id = 0xFF; + + if (resp->soc_info_valid) + qmi->soc_info.soc_id = resp->soc_info.soc_id; + + if (resp->fw_version_info_valid) { + qmi->fw_version = resp->fw_version_info.fw_version; + strscpy(qmi->fw_build_timestamp, resp->fw_version_info.fw_build_timestamp, + sizeof(qmi->fw_build_timestamp)); + } + + if (resp->fw_build_id_valid) + strscpy(qmi->fw_build_id, resp->fw_build_id, + MAX_BUILD_ID_LEN + 1); + + if (!test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) { + ath10k_info(ar, "qmi chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x", + qmi->chip_info.chip_id, qmi->chip_info.chip_family, + qmi->board_info.board_id, qmi->soc_info.soc_id); + ath10k_info(ar, "qmi fw_version 0x%x fw_build_timestamp %s fw_build_id %s", + qmi->fw_version, qmi->fw_build_timestamp, qmi->fw_build_id); + } + + if (resp->fw_build_id_valid) + ath10k_qmi_add_wlan_ver_smem(ar, qmi->fw_build_id); + + kfree(resp); + return 0; + +out: + kfree(resp); + return ret; +} + +static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi) +{ + struct wlfw_host_cap_resp_msg_v01 resp = {}; + struct wlfw_host_cap_req_msg_v01 req = {}; + const struct qmi_elem_info *req_ei; + struct ath10k *ar = qmi->ar; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct qmi_txn txn; + int ret; + + req.daemon_support_valid = 1; + req.daemon_support = 0; + + ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_host_cap_resp_msg_v01_ei, + &resp); + if (ret < 0) + goto out; + + if (test_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags)) + req_ei = wlfw_host_cap_8bit_req_msg_v01_ei; + else + req_ei = wlfw_host_cap_req_msg_v01_ei; + + ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, + QMI_WLFW_HOST_CAP_REQ_V01, + WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN, + req_ei, &req); + if (ret < 0) { + qmi_txn_cancel(&txn); + ath10k_err(ar, "failed to send host capability request: %d\n", ret); + goto out; + } + + ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); + if (ret < 0) + goto out; + + /* older FW didn't support this request, which is not fatal */ + if (resp.resp.result != QMI_RESULT_SUCCESS_V01 && + resp.resp.error != QMI_ERR_NOT_SUPPORTED_V01) { + ath10k_err(ar, "host capability request rejected: %d\n", resp.resp.error); + ret = -EINVAL; + goto out; + } + + ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi host capability request completed\n"); + return 0; + +out: + return ret; +} + +int ath10k_qmi_set_fw_log_mode(struct ath10k *ar, u8 fw_log_mode) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct wlfw_ini_resp_msg_v01 resp = {}; + struct ath10k_qmi *qmi = ar_snoc->qmi; + struct wlfw_ini_req_msg_v01 req = {}; + struct qmi_txn txn; + int ret; + + req.enablefwlog_valid = 1; + req.enablefwlog = fw_log_mode; + + ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_ini_resp_msg_v01_ei, + &resp); + if (ret < 0) + goto out; + + ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, + QMI_WLFW_INI_REQ_V01, + WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN, + wlfw_ini_req_msg_v01_ei, &req); + if (ret < 0) { + qmi_txn_cancel(&txn); + ath10k_err(ar, "failed to send fw log request: %d\n", ret); + goto out; + } + + ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); + if (ret < 0) + goto out; + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + ath10k_err(ar, "fw log request rejected: %d\n", + resp.resp.error); + ret = -EINVAL; + goto out; + } + ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi fw log request completed, mode: %d\n", + fw_log_mode); + return 0; + +out: + return ret; +} + +static int +ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi *qmi) +{ + struct wlfw_ind_register_resp_msg_v01 resp = {}; + struct wlfw_ind_register_req_msg_v01 req = {}; + struct ath10k *ar = qmi->ar; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct qmi_txn txn; + int ret; + + req.client_id_valid = 1; + req.client_id = ATH10K_QMI_CLIENT_ID; + req.fw_ready_enable_valid = 1; + req.fw_ready_enable = 1; + req.msa_ready_enable_valid = 1; + req.msa_ready_enable = 1; + + if (ar_snoc->xo_cal_supported) { + req.xo_cal_enable_valid = 1; + req.xo_cal_enable = 1; + } + + ret = qmi_txn_init(&qmi->qmi_hdl, &txn, + wlfw_ind_register_resp_msg_v01_ei, &resp); + if (ret < 0) + goto out; + + ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, + QMI_WLFW_IND_REGISTER_REQ_V01, + WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN, + wlfw_ind_register_req_msg_v01_ei, &req); + if (ret < 0) { + qmi_txn_cancel(&txn); + ath10k_err(ar, "failed to send indication registered request: %d\n", ret); + goto out; + } + + ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); + if (ret < 0) + goto out; + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + ath10k_err(ar, "indication request rejected: %d\n", resp.resp.error); + ret = -EINVAL; + goto out; + } + + if (resp.fw_status_valid) { + if (resp.fw_status & QMI_WLFW_FW_READY_V01) + qmi->fw_ready = true; + } + ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi indication register request completed\n"); + return 0; + +out: + return ret; +} + +static void ath10k_qmi_event_server_arrive(struct ath10k_qmi *qmi) +{ + struct ath10k *ar = qmi->ar; + int ret; + + ret = ath10k_qmi_ind_register_send_sync_msg(qmi); + if (ret) + return; + + if (qmi->fw_ready) { + ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND); + return; + } + + ret = ath10k_qmi_host_cap_send_sync(qmi); + if (ret) + return; + + ret = ath10k_qmi_msa_mem_info_send_sync_msg(qmi); + if (ret) + return; + + /* + * HACK: sleep for a while between receiving the msa info response + * and the XPU update to prevent SDM845 from crashing due to a security + * violation, when running MPSS.AT.4.0.c2-01184-SDM845_GEN_PACK-1. + */ + msleep(20); + + ret = ath10k_qmi_setup_msa_permissions(qmi); + if (ret) + return; + + ret = ath10k_qmi_msa_ready_send_sync_msg(qmi); + if (ret) + goto err_setup_msa; + + ret = ath10k_qmi_cap_send_sync_msg(qmi); + if (ret) + goto err_setup_msa; + + return; + +err_setup_msa: + ath10k_qmi_remove_msa_permission(qmi); +} + +static int ath10k_qmi_fetch_board_file(struct ath10k_qmi *qmi) +{ + struct ath10k *ar = qmi->ar; + int ret; + + ar->hif.bus = ATH10K_BUS_SNOC; + ar->id.qmi_ids_valid = true; + ar->id.qmi_board_id = qmi->board_info.board_id; + ar->id.qmi_chip_id = qmi->chip_info.chip_id; + ar->hw_params.fw.dir = WCN3990_HW_1_0_FW_DIR; + + ret = ath10k_core_check_dt(ar); + if (ret) + ath10k_dbg(ar, ATH10K_DBG_QMI, "DT bdf variant name not set.\n"); + + return ath10k_core_fetch_board_file(qmi->ar, ATH10K_BD_IE_BOARD); +} + +static int +ath10k_qmi_driver_event_post(struct ath10k_qmi *qmi, + enum ath10k_qmi_driver_event_type type, + void *data) +{ + struct ath10k_qmi_driver_event *event; + + event = kzalloc(sizeof(*event), GFP_ATOMIC); + if (!event) + return -ENOMEM; + + event->type = type; + event->data = data; + + spin_lock(&qmi->event_lock); + list_add_tail(&event->list, &qmi->event_list); + spin_unlock(&qmi->event_lock); + + queue_work(qmi->event_wq, &qmi->event_work); + + return 0; +} + +static void ath10k_qmi_event_server_exit(struct ath10k_qmi *qmi) +{ + struct ath10k *ar = qmi->ar; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + ath10k_qmi_remove_msa_permission(qmi); + ath10k_core_free_board_files(ar); + if (!test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags) && + !test_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags)) + ath10k_snoc_fw_crashed_dump(ar); + + ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_DOWN_IND); + ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service disconnected\n"); +} + +static void ath10k_qmi_event_msa_ready(struct ath10k_qmi *qmi) +{ + int ret; + + ret = ath10k_qmi_fetch_board_file(qmi); + if (ret) + goto out; + + ret = ath10k_qmi_bdf_dnld_send_sync(qmi); + if (ret) + goto out; + + ret = ath10k_qmi_send_cal_report_req(qmi); + +out: + return; +} + +static int ath10k_qmi_event_fw_ready_ind(struct ath10k_qmi *qmi) +{ + struct ath10k *ar = qmi->ar; + + ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw ready event received\n"); + ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND); + + return 0; +} + +static void ath10k_qmi_fw_ready_ind(struct qmi_handle *qmi_hdl, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, const void *data) +{ + struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl); + + ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_FW_READY_IND, NULL); +} + +static void ath10k_qmi_msa_ready_ind(struct qmi_handle *qmi_hdl, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, const void *data) +{ + struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl); + + ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_MSA_READY_IND, NULL); +} + +static const struct qmi_msg_handler qmi_msg_handler[] = { + { + .type = QMI_INDICATION, + .msg_id = QMI_WLFW_FW_READY_IND_V01, + .ei = wlfw_fw_ready_ind_msg_v01_ei, + .decoded_size = sizeof(struct wlfw_fw_ready_ind_msg_v01), + .fn = ath10k_qmi_fw_ready_ind, + }, + { + .type = QMI_INDICATION, + .msg_id = QMI_WLFW_MSA_READY_IND_V01, + .ei = wlfw_msa_ready_ind_msg_v01_ei, + .decoded_size = sizeof(struct wlfw_msa_ready_ind_msg_v01), + .fn = ath10k_qmi_msa_ready_ind, + }, + {} +}; + +static int ath10k_qmi_new_server(struct qmi_handle *qmi_hdl, + struct qmi_service *service) +{ + struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl); + struct sockaddr_qrtr *sq = &qmi->sq; + struct ath10k *ar = qmi->ar; + int ret; + + sq->sq_family = AF_QIPCRTR; + sq->sq_node = service->node; + sq->sq_port = service->port; + + ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service found\n"); + + ret = kernel_connect(qmi_hdl->sock, (struct sockaddr_unsized *)&qmi->sq, + sizeof(qmi->sq), 0); + if (ret) { + ath10k_err(ar, "failed to connect to a remote QMI service port\n"); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wifi fw qmi service connected\n"); + ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_ARRIVE, NULL); + + return ret; +} + +static void ath10k_qmi_del_server(struct qmi_handle *qmi_hdl, + struct qmi_service *service) +{ + struct ath10k_qmi *qmi = + container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl); + + qmi->fw_ready = false; + + /* + * The del_server event is to be processed only if coming from + * the qmi server. The qmi infrastructure sends del_server, when + * any client releases the qmi handle. In this case do not process + * this del_server event. + */ + if (qmi->state == ATH10K_QMI_STATE_INIT_DONE) + ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_EXIT, + NULL); +} + +static const struct qmi_ops ath10k_qmi_ops = { + .new_server = ath10k_qmi_new_server, + .del_server = ath10k_qmi_del_server, +}; + +static void ath10k_qmi_driver_event_work(struct work_struct *work) +{ + struct ath10k_qmi *qmi = container_of(work, struct ath10k_qmi, + event_work); + struct ath10k_qmi_driver_event *event; + struct ath10k *ar = qmi->ar; + + spin_lock(&qmi->event_lock); + while (!list_empty(&qmi->event_list)) { + event = list_first_entry(&qmi->event_list, + struct ath10k_qmi_driver_event, list); + list_del(&event->list); + spin_unlock(&qmi->event_lock); + + switch (event->type) { + case ATH10K_QMI_EVENT_SERVER_ARRIVE: + ath10k_qmi_event_server_arrive(qmi); + if (qmi->no_msa_ready_indicator) { + ath10k_info(ar, "qmi not waiting for msa_ready indicator"); + ath10k_qmi_event_msa_ready(qmi); + } + break; + case ATH10K_QMI_EVENT_SERVER_EXIT: + ath10k_qmi_event_server_exit(qmi); + break; + case ATH10K_QMI_EVENT_FW_READY_IND: + ath10k_qmi_event_fw_ready_ind(qmi); + break; + case ATH10K_QMI_EVENT_MSA_READY_IND: + if (qmi->no_msa_ready_indicator) { + ath10k_warn(ar, "qmi unexpected msa_ready indicator"); + break; + } + ath10k_qmi_event_msa_ready(qmi); + break; + default: + ath10k_warn(ar, "invalid event type: %d", event->type); + break; + } + kfree(event); + spin_lock(&qmi->event_lock); + } + spin_unlock(&qmi->event_lock); +} + +int ath10k_qmi_init(struct ath10k *ar, u32 msa_size) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct device *dev = ar->dev; + struct ath10k_qmi *qmi; + int ret; + + qmi = kzalloc(sizeof(*qmi), GFP_KERNEL); + if (!qmi) + return -ENOMEM; + + qmi->ar = ar; + ar_snoc->qmi = qmi; + + if (of_property_read_bool(dev->of_node, "qcom,msa-fixed-perm")) + qmi->msa_fixed_perm = true; + + if (of_property_read_bool(dev->of_node, "qcom,no-msa-ready-indicator")) + qmi->no_msa_ready_indicator = true; + + ret = qmi_handle_init(&qmi->qmi_hdl, + WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN, + &ath10k_qmi_ops, qmi_msg_handler); + if (ret) + goto err; + + qmi->event_wq = alloc_ordered_workqueue("ath10k_qmi_driver_event", 0); + if (!qmi->event_wq) { + ath10k_err(ar, "failed to allocate workqueue\n"); + ret = -EFAULT; + goto err_release_qmi_handle; + } + + INIT_LIST_HEAD(&qmi->event_list); + spin_lock_init(&qmi->event_lock); + INIT_WORK(&qmi->event_work, ath10k_qmi_driver_event_work); + + ret = qmi_add_lookup(&qmi->qmi_hdl, WLFW_SERVICE_ID_V01, + WLFW_SERVICE_VERS_V01, 0); + if (ret) + goto err_qmi_lookup; + + qmi->state = ATH10K_QMI_STATE_INIT_DONE; + return 0; + +err_qmi_lookup: + destroy_workqueue(qmi->event_wq); + +err_release_qmi_handle: + qmi_handle_release(&qmi->qmi_hdl); + +err: + kfree(qmi); + return ret; +} + +int ath10k_qmi_deinit(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_qmi *qmi = ar_snoc->qmi; + + qmi->state = ATH10K_QMI_STATE_DEINIT; + qmi_handle_release(&qmi->qmi_hdl); + cancel_work_sync(&qmi->event_work); + destroy_workqueue(qmi->event_wq); + kfree(qmi); + ar_snoc->qmi = NULL; + + return 0; +} diff --git a/drivers/net/wireless/ath/ath10k/qmi.h b/drivers/net/wireless/ath/ath10k/qmi.h new file mode 100644 index 000000000000..0816eb4e4a18 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/qmi.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + */ +#ifndef _ATH10K_QMI_H_ +#define _ATH10K_QMI_H_ + +#include <linux/soc/qcom/qmi.h> +#include <linux/qrtr.h> +#include "qmi_wlfw_v01.h" + +#define MAX_NUM_MEMORY_REGIONS 2 +#define MAX_TIMESTAMP_LEN 32 +#define MAX_BUILD_ID_LEN 128 +#define MAX_NUM_CAL_V01 5 + +enum ath10k_qmi_driver_event_type { + ATH10K_QMI_EVENT_SERVER_ARRIVE, + ATH10K_QMI_EVENT_SERVER_EXIT, + ATH10K_QMI_EVENT_FW_READY_IND, + ATH10K_QMI_EVENT_FW_DOWN_IND, + ATH10K_QMI_EVENT_MSA_READY_IND, + ATH10K_QMI_EVENT_MAX, +}; + +struct ath10k_msa_mem_info { + phys_addr_t addr; + u32 size; + bool secure; +}; + +struct ath10k_qmi_chip_info { + u32 chip_id; + u32 chip_family; +}; + +struct ath10k_qmi_board_info { + u32 board_id; +}; + +struct ath10k_qmi_soc_info { + u32 soc_id; +}; + +struct ath10k_qmi_cal_data { + u32 cal_id; + u32 total_size; + u8 *data; +}; + +struct ath10k_tgt_pipe_cfg { + __le32 pipe_num; + __le32 pipe_dir; + __le32 nentries; + __le32 nbytes_max; + __le32 flags; + __le32 reserved; +}; + +struct ath10k_svc_pipe_cfg { + __le32 service_id; + __le32 pipe_dir; + __le32 pipe_num; +}; + +struct ath10k_shadow_reg_cfg { + __le16 ce_id; + __le16 reg_offset; +}; + +struct ath10k_qmi_wlan_enable_cfg { + u32 num_ce_tgt_cfg; + struct ath10k_tgt_pipe_cfg *ce_tgt_cfg; + u32 num_ce_svc_pipe_cfg; + struct ath10k_svc_pipe_cfg *ce_svc_cfg; + u32 num_shadow_reg_cfg; + struct ath10k_shadow_reg_cfg *shadow_reg_cfg; +}; + +struct ath10k_qmi_driver_event { + struct list_head list; + enum ath10k_qmi_driver_event_type type; + void *data; +}; + +enum ath10k_qmi_state { + ATH10K_QMI_STATE_INIT_DONE, + ATH10K_QMI_STATE_DEINIT, +}; + +struct ath10k_qmi { + struct ath10k *ar; + struct qmi_handle qmi_hdl; + struct sockaddr_qrtr sq; + struct work_struct event_work; + struct workqueue_struct *event_wq; + struct list_head event_list; + spinlock_t event_lock; /* spinlock for qmi event list */ + u32 nr_mem_region; + struct ath10k_msa_mem_info mem_region[MAX_NUM_MEMORY_REGIONS]; + struct ath10k_qmi_chip_info chip_info; + struct ath10k_qmi_board_info board_info; + struct ath10k_qmi_soc_info soc_info; + char fw_build_id[MAX_BUILD_ID_LEN + 1]; + u32 fw_version; + bool fw_ready; + char fw_build_timestamp[MAX_TIMESTAMP_LEN + 1]; + struct ath10k_qmi_cal_data cal_data[MAX_NUM_CAL_V01]; + bool msa_fixed_perm; + bool no_msa_ready_indicator; + enum ath10k_qmi_state state; +}; + +int ath10k_qmi_wlan_enable(struct ath10k *ar, + struct ath10k_qmi_wlan_enable_cfg *config, + enum wlfw_driver_mode_enum_v01 mode, + const char *version); +int ath10k_qmi_wlan_disable(struct ath10k *ar); +int ath10k_qmi_init(struct ath10k *ar, u32 msa_size); +int ath10k_qmi_deinit(struct ath10k *ar); +int ath10k_qmi_set_fw_log_mode(struct ath10k *ar, u8 fw_log_mode); + +#endif /* ATH10K_QMI_H */ diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c new file mode 100644 index 000000000000..0e85c75d2278 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c @@ -0,0 +1,2309 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/soc/qcom/qmi.h> +#include <linux/types.h> +#include "qmi_wlfw_v01.h" + +static const struct qmi_elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, + pipe_num), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_pipedir_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, + pipe_dir), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, + nentries), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, + nbytes_max), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, + flags), + }, + {} +}; + +static const struct qmi_elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01, + service_id), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_pipedir_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01, + pipe_dir), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01, + pipe_num), + }, + {} +}; + +static const struct qmi_elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01, + id), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01, + offset), + }, + {} +}; + +static const struct qmi_elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_shadow_reg_v2_cfg_s_v01, + addr), + }, + {} +}; + +static const struct qmi_elem_info wlfw_memory_region_info_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_memory_region_info_s_v01, + region_addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_memory_region_info_s_v01, + size), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_memory_region_info_s_v01, + secure_flag), + }, + {} +}; + +static const struct qmi_elem_info wlfw_mem_cfg_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_cfg_s_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_cfg_s_v01, + size), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_cfg_s_v01, + secure_flag), + }, + {} +}; + +static const struct qmi_elem_info wlfw_mem_seg_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_seg_s_v01, + size), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_mem_type_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_seg_s_v01, + type), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_seg_s_v01, + mem_cfg_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_NUM_MEM_CFG_V01, + .elem_size = sizeof(struct wlfw_mem_cfg_s_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_seg_s_v01, + mem_cfg), + .ei_array = wlfw_mem_cfg_s_v01_ei, + }, + {} +}; + +static const struct qmi_elem_info wlfw_mem_seg_resp_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_seg_resp_s_v01, + addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_seg_resp_s_v01, + size), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_mem_type_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_seg_resp_s_v01, + type), + }, + {} +}; + +static const struct qmi_elem_info wlfw_rf_chip_info_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_rf_chip_info_s_v01, + chip_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_rf_chip_info_s_v01, + chip_family), + }, + {} +}; + +static const struct qmi_elem_info wlfw_rf_board_info_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_rf_board_info_s_v01, + board_id), + }, + {} +}; + +static const struct qmi_elem_info wlfw_soc_info_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_soc_info_s_v01, + soc_id), + }, + {} +}; + +static const struct qmi_elem_info wlfw_fw_version_info_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_fw_version_info_s_v01, + fw_version), + }, + { + .data_type = QMI_STRING, + .elem_len = QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_fw_version_info_s_v01, + fw_build_timestamp), + }, + {} +}; + +const struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + fw_ready_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + fw_ready_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + initiate_cal_download_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + initiate_cal_download_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + initiate_cal_update_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + initiate_cal_update_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + msa_ready_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + msa_ready_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + pin_connect_result_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + pin_connect_result_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + client_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + client_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + request_mem_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + request_mem_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + mem_ready_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + mem_ready_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + fw_init_done_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + fw_init_done_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + rejuvenate_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + rejuvenate_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + xo_cal_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + xo_cal_enable), + }, + {} +}; + +const struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_ind_register_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_ind_register_resp_msg_v01, + fw_status_valid), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_ind_register_resp_msg_v01, + fw_status), + }, + {} +}; + +const struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[] = { + {} +}; + +const struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[] = { + {} +}; + +const struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01, + pwr_pin_result_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01, + pwr_pin_result), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01, + phy_io_pin_result_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01, + phy_io_pin_result), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01, + rf_pin_result_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01, + rf_pin_result), + }, + {} +}; + +const struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_driver_mode_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01, + mode), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01, + hw_debug_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01, + hw_debug), + }, + {} +}; + +const struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_wlan_mode_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; + +const struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + host_version_valid), + }, + { + .data_type = QMI_STRING, + .elem_len = QMI_WLFW_MAX_STR_LEN_V01 + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + host_version), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + tgt_cfg_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + tgt_cfg_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_NUM_CE_V01, + .elem_size = sizeof(struct wlfw_ce_tgt_pipe_cfg_s_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + tgt_cfg), + .ei_array = wlfw_ce_tgt_pipe_cfg_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + svc_cfg_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + svc_cfg_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_NUM_SVC_V01, + .elem_size = sizeof(struct wlfw_ce_svc_pipe_cfg_s_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + svc_cfg), + .ei_array = wlfw_ce_svc_pipe_cfg_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + shadow_reg_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + shadow_reg_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01, + .elem_size = sizeof(struct wlfw_shadow_reg_cfg_s_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + shadow_reg), + .ei_array = wlfw_shadow_reg_cfg_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + shadow_reg_v2_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + shadow_reg_v2_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_SHADOW_REG_V2, + .elem_size = sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + shadow_reg_v2), + .ei_array = wlfw_shadow_reg_v2_cfg_s_v01_ei, + }, + {} +}; + +const struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_wlan_cfg_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; + +const struct qmi_elem_info wlfw_cap_req_msg_v01_ei[] = { + {} +}; + +const struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + chip_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct wlfw_rf_chip_info_s_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + chip_info), + .ei_array = wlfw_rf_chip_info_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + board_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct wlfw_rf_board_info_s_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + board_info), + .ei_array = wlfw_rf_board_info_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + soc_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct wlfw_soc_info_s_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + soc_info), + .ei_array = wlfw_soc_info_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + fw_version_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct wlfw_fw_version_info_s_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + fw_version_info), + .ei_array = wlfw_fw_version_info_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + fw_build_id_valid), + }, + { + .data_type = QMI_STRING, + .elem_len = QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + fw_build_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + num_macs_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + num_macs), + }, + {} +}; + +const struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + valid), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + file_id_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + file_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + total_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + total_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + seg_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + seg_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + data_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + data_len), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01, + .elem_size = sizeof(u8), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + data), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + end_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + end), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + bdf_type_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + bdf_type), + }, + {} +}; + +const struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_bdf_download_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; + +const struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[] = { + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_cal_report_req_msg_v01, + meta_data_len), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = QMI_WLFW_MAX_NUM_CAL_V01, + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_cal_report_req_msg_v01, + meta_data), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_cal_report_req_msg_v01, + xo_cal_data_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_cal_report_req_msg_v01, + xo_cal_data), + }, + {} +}; + +const struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_cal_report_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; + +const struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_initiate_cal_download_ind_msg_v01, + cal_id), + }, + {} +}; + +const struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + valid), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + file_id_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + file_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + total_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + total_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + seg_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + seg_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + data_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + data_len), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01, + .elem_size = sizeof(u8), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + data), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + end_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + end), + }, + {} +}; + +const struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_cal_download_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; + +const struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_initiate_cal_update_ind_msg_v01, + cal_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_initiate_cal_update_ind_msg_v01, + total_size), + }, + {} +}; + +const struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_cal_update_req_msg_v01, + cal_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_cal_update_req_msg_v01, + seg_id), + }, + {} +}; + +const struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + file_id_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + file_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + total_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + total_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + seg_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + seg_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + data_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + data_len), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01, + .elem_size = sizeof(u8), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + data), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + end_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + end), + }, + {} +}; + +const struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_msa_info_req_msg_v01, + msa_addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_msa_info_req_msg_v01, + size), + }, + {} +}; + +const struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_msa_info_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct wlfw_msa_info_resp_msg_v01, + mem_region_info_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_MEM_REG_V01, + .elem_size = sizeof(struct wlfw_memory_region_info_s_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct wlfw_msa_info_resp_msg_v01, + mem_region_info), + .ei_array = wlfw_memory_region_info_s_v01_ei, + }, + {} +}; + +const struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[] = { + {} +}; + +const struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_msa_ready_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; + +const struct qmi_elem_info wlfw_ini_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_ini_req_msg_v01, + enablefwlog_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_ini_req_msg_v01, + enablefwlog), + }, + {} +}; + +const struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_ini_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; + +const struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01, + mem_type), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01, + data_len), + }, + {} +}; + +const struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01, + data_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01, + data_len), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01, + .elem_size = sizeof(u8), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01, + data), + }, + {} +}; + +const struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01, + mem_type), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01, + data_len), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01, + .elem_size = sizeof(u8), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01, + data), + }, + {} +}; + +const struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_athdiag_write_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; + +const struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_vbatt_req_msg_v01, + voltage_uv), + }, + {} +}; + +const struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_vbatt_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; + +const struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_mac_addr_req_msg_v01, + mac_addr_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = QMI_WLFW_MAC_ADDR_SIZE_V01, + .elem_size = sizeof(u8), + .array_type = STATIC_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_mac_addr_req_msg_v01, + mac_addr), + }, + {} +}; + +const struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_mac_addr_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; + +const struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + daemon_support_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + daemon_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + wake_msi_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + wake_msi), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + gpios_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + gpios_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_WLFW_MAX_NUM_GPIO_V01, + .elem_size = sizeof(u32), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + gpios), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + nm_modem_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + nm_modem), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + bdf_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + bdf_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + bdf_cache_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + bdf_cache_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + m3_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + m3_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + m3_cache_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + m3_cache_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_filesys_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_filesys_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_cache_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_cache_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_done_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_done), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + mem_bucket_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + mem_bucket), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + mem_cfg_mode_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + mem_cfg_mode), + }, + {} +}; + +const struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + daemon_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + daemon_support), + }, + {} +}; + +const struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_host_cap_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; + +const struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[] = { + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_request_mem_ind_msg_v01, + mem_seg_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01, + .elem_size = sizeof(struct wlfw_mem_seg_s_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_request_mem_ind_msg_v01, + mem_seg), + .ei_array = wlfw_mem_seg_s_v01_ei, + }, + {} +}; + +const struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[] = { + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_respond_mem_req_msg_v01, + mem_seg_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01, + .elem_size = sizeof(struct wlfw_mem_seg_resp_s_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_respond_mem_req_msg_v01, + mem_seg), + .ei_array = wlfw_mem_seg_resp_s_v01_ei, + }, + {} +}; + +const struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_respond_mem_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; + +const struct qmi_elem_info wlfw_mem_ready_ind_msg_v01_ei[] = { + {} +}; + +const struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[] = { + {} +}; + +const struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, + cause_for_rejuvenation_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, + cause_for_rejuvenation), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, + requesting_sub_system_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, + requesting_sub_system), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, + line_number_valid), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, + line_number), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, + function_name_valid), + }, + { + .data_type = QMI_STRING, + .elem_len = QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, + function_name), + }, + {} +}; + +const struct qmi_elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[] = { + {} +}; + +const struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_rejuvenate_ack_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; + +const struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_dynamic_feature_mask_req_msg_v01, + mask_valid), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_dynamic_feature_mask_req_msg_v01, + mask), + }, + {} +}; + +const struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01, + prev_mask_valid), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01, + prev_mask), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01, + curr_mask_valid), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01, + curr_mask), + }, + {} +}; + +const struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_m3_info_req_msg_v01, + addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_m3_info_req_msg_v01, + size), + }, + {} +}; + +const struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_m3_info_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; + +const struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_xo_cal_ind_msg_v01, + xo_cal_data), + }, + {} +}; diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h new file mode 100644 index 000000000000..9f311f3bc9e7 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h @@ -0,0 +1,694 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef WCN3990_QMI_SVC_V01_H +#define WCN3990_QMI_SVC_V01_H + +#define WLFW_SERVICE_ID_V01 0x45 +#define WLFW_SERVICE_VERS_V01 0x01 + +#define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025 +#define QMI_WLFW_MEM_READY_IND_V01 0x0037 +#define QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01 0x003B +#define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A +#define QMI_WLFW_HOST_CAP_REQ_V01 0x0034 +#define QMI_WLFW_M3_INFO_REQ_V01 0x003C +#define QMI_WLFW_CAP_REQ_V01 0x0024 +#define QMI_WLFW_FW_INIT_DONE_IND_V01 0x0038 +#define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026 +#define QMI_WLFW_M3_INFO_RESP_V01 0x003C +#define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029 +#define QMI_WLFW_CAL_DOWNLOAD_RESP_V01 0x0027 +#define QMI_WLFW_XO_CAL_IND_V01 0x003D +#define QMI_WLFW_INI_RESP_V01 0x002F +#define QMI_WLFW_CAL_REPORT_RESP_V01 0x0026 +#define QMI_WLFW_MAC_ADDR_RESP_V01 0x0033 +#define QMI_WLFW_INITIATE_CAL_DOWNLOAD_IND_V01 0x0028 +#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034 +#define QMI_WLFW_MSA_READY_IND_V01 0x002B +#define QMI_WLFW_ATHDIAG_WRITE_RESP_V01 0x0031 +#define QMI_WLFW_WLAN_MODE_REQ_V01 0x0022 +#define QMI_WLFW_IND_REGISTER_REQ_V01 0x0020 +#define QMI_WLFW_WLAN_CFG_RESP_V01 0x0023 +#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035 +#define QMI_WLFW_REJUVENATE_IND_V01 0x0039 +#define QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01 0x003B +#define QMI_WLFW_ATHDIAG_WRITE_REQ_V01 0x0031 +#define QMI_WLFW_WLAN_MODE_RESP_V01 0x0022 +#define QMI_WLFW_RESPOND_MEM_REQ_V01 0x0036 +#define QMI_WLFW_PIN_CONNECT_RESULT_IND_V01 0x002C +#define QMI_WLFW_FW_READY_IND_V01 0x0021 +#define QMI_WLFW_MSA_READY_RESP_V01 0x002E +#define QMI_WLFW_CAL_UPDATE_REQ_V01 0x0029 +#define QMI_WLFW_INI_REQ_V01 0x002F +#define QMI_WLFW_BDF_DOWNLOAD_RESP_V01 0x0025 +#define QMI_WLFW_REJUVENATE_ACK_RESP_V01 0x003A +#define QMI_WLFW_MSA_INFO_RESP_V01 0x002D +#define QMI_WLFW_MSA_READY_REQ_V01 0x002E +#define QMI_WLFW_CAP_RESP_V01 0x0024 +#define QMI_WLFW_REJUVENATE_ACK_REQ_V01 0x003A +#define QMI_WLFW_ATHDIAG_READ_RESP_V01 0x0030 +#define QMI_WLFW_VBATT_REQ_V01 0x0032 +#define QMI_WLFW_MAC_ADDR_REQ_V01 0x0033 +#define QMI_WLFW_RESPOND_MEM_RESP_V01 0x0036 +#define QMI_WLFW_VBATT_RESP_V01 0x0032 +#define QMI_WLFW_MSA_INFO_REQ_V01 0x002D +#define QMI_WLFW_CAL_DOWNLOAD_REQ_V01 0x0027 +#define QMI_WLFW_ATHDIAG_READ_REQ_V01 0x0030 +#define QMI_WLFW_WLAN_CFG_REQ_V01 0x0023 +#define QMI_WLFW_IND_REGISTER_RESP_V01 0x0020 + +#define QMI_WLFW_MAX_MEM_REG_V01 2 +#define QMI_WLFW_MAX_NUM_MEM_SEG_V01 16 +#define QMI_WLFW_MAX_NUM_CAL_V01 5 +#define QMI_WLFW_MAX_DATA_SIZE_V01 6144 +#define QMI_WLFW_FUNCTION_NAME_LEN_V01 128 +#define QMI_WLFW_MAX_NUM_CE_V01 12 +#define QMI_WLFW_MAX_TIMESTAMP_LEN_V01 32 +#define QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01 6144 +#define QMI_WLFW_MAX_NUM_GPIO_V01 32 +#define QMI_WLFW_MAX_BUILD_ID_LEN_V01 128 +#define QMI_WLFW_MAX_NUM_MEM_CFG_V01 2 +#define QMI_WLFW_MAX_STR_LEN_V01 16 +#define QMI_WLFW_MAX_NUM_SHADOW_REG_V01 24 +#define QMI_WLFW_MAC_ADDR_SIZE_V01 6 +#define QMI_WLFW_MAX_SHADOW_REG_V2 36 +#define QMI_WLFW_MAX_NUM_SVC_V01 24 + +enum wlfw_driver_mode_enum_v01 { + QMI_WLFW_MISSION_V01 = 0, + QMI_WLFW_FTM_V01 = 1, + QMI_WLFW_EPPING_V01 = 2, + QMI_WLFW_WALTEST_V01 = 3, + QMI_WLFW_OFF_V01 = 4, + QMI_WLFW_CCPM_V01 = 5, + QMI_WLFW_QVIT_V01 = 6, + QMI_WLFW_CALIBRATION_V01 = 7, +}; + +enum wlfw_cal_temp_id_enum_v01 { + QMI_WLFW_CAL_TEMP_IDX_0_V01 = 0, + QMI_WLFW_CAL_TEMP_IDX_1_V01 = 1, + QMI_WLFW_CAL_TEMP_IDX_2_V01 = 2, + QMI_WLFW_CAL_TEMP_IDX_3_V01 = 3, + QMI_WLFW_CAL_TEMP_IDX_4_V01 = 4, +}; + +enum wlfw_pipedir_enum_v01 { + QMI_WLFW_PIPEDIR_NONE_V01 = 0, + QMI_WLFW_PIPEDIR_IN_V01 = 1, + QMI_WLFW_PIPEDIR_OUT_V01 = 2, + QMI_WLFW_PIPEDIR_INOUT_V01 = 3, +}; + +enum wlfw_mem_type_enum_v01 { + QMI_WLFW_MEM_TYPE_MSA_V01 = 0, + QMI_WLFW_MEM_TYPE_DDR_V01 = 1, +}; + +#define QMI_WLFW_CE_ATTR_FLAGS_V01 ((u32)0x00) +#define QMI_WLFW_CE_ATTR_NO_SNOOP_V01 ((u32)0x01) +#define QMI_WLFW_CE_ATTR_BYTE_SWAP_DATA_V01 ((u32)0x02) +#define QMI_WLFW_CE_ATTR_SWIZZLE_DESCRIPTORS_V01 ((u32)0x04) +#define QMI_WLFW_CE_ATTR_DISABLE_INTR_V01 ((u32)0x08) +#define QMI_WLFW_CE_ATTR_ENABLE_POLL_V01 ((u32)0x10) + +#define QMI_WLFW_ALREADY_REGISTERED_V01 ((u64)0x01ULL) +#define QMI_WLFW_FW_READY_V01 ((u64)0x02ULL) +#define QMI_WLFW_MSA_READY_V01 ((u64)0x04ULL) +#define QMI_WLFW_MEM_READY_V01 ((u64)0x08ULL) +#define QMI_WLFW_FW_INIT_DONE_V01 ((u64)0x10ULL) + +#define QMI_WLFW_FW_REJUVENATE_V01 ((u64)0x01ULL) + +struct wlfw_ce_tgt_pipe_cfg_s_v01 { + __le32 pipe_num; + __le32 pipe_dir; + __le32 nentries; + __le32 nbytes_max; + __le32 flags; +}; + +struct wlfw_ce_svc_pipe_cfg_s_v01 { + __le32 service_id; + __le32 pipe_dir; + __le32 pipe_num; +}; + +struct wlfw_shadow_reg_cfg_s_v01 { + u16 id; + u16 offset; +}; + +struct wlfw_shadow_reg_v2_cfg_s_v01 { + u32 addr; +}; + +struct wlfw_memory_region_info_s_v01 { + u64 region_addr; + u32 size; + u8 secure_flag; +}; + +struct wlfw_mem_cfg_s_v01 { + u64 offset; + u32 size; + u8 secure_flag; +}; + +struct wlfw_mem_seg_s_v01 { + u32 size; + enum wlfw_mem_type_enum_v01 type; + u32 mem_cfg_len; + struct wlfw_mem_cfg_s_v01 mem_cfg[QMI_WLFW_MAX_NUM_MEM_CFG_V01]; +}; + +struct wlfw_mem_seg_resp_s_v01 { + u64 addr; + u32 size; + enum wlfw_mem_type_enum_v01 type; +}; + +struct wlfw_rf_chip_info_s_v01 { + u32 chip_id; + u32 chip_family; +}; + +struct wlfw_rf_board_info_s_v01 { + u32 board_id; +}; + +struct wlfw_soc_info_s_v01 { + u32 soc_id; +}; + +struct wlfw_fw_version_info_s_v01 { + u32 fw_version; + char fw_build_timestamp[QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1]; +}; + +struct wlfw_ind_register_req_msg_v01 { + u8 fw_ready_enable_valid; + u8 fw_ready_enable; + u8 initiate_cal_download_enable_valid; + u8 initiate_cal_download_enable; + u8 initiate_cal_update_enable_valid; + u8 initiate_cal_update_enable; + u8 msa_ready_enable_valid; + u8 msa_ready_enable; + u8 pin_connect_result_enable_valid; + u8 pin_connect_result_enable; + u8 client_id_valid; + u32 client_id; + u8 request_mem_enable_valid; + u8 request_mem_enable; + u8 mem_ready_enable_valid; + u8 mem_ready_enable; + u8 fw_init_done_enable_valid; + u8 fw_init_done_enable; + u8 rejuvenate_enable_valid; + u32 rejuvenate_enable; + u8 xo_cal_enable_valid; + u8 xo_cal_enable; +}; + +#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 50 +extern const struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[]; + +struct wlfw_ind_register_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u8 fw_status_valid; + u64 fw_status; +}; + +#define WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN 18 +extern const struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[]; + +struct wlfw_fw_ready_ind_msg_v01 { + char placeholder; +}; + +#define WLFW_FW_READY_IND_MSG_V01_MAX_MSG_LEN 0 +extern const struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[]; + +struct wlfw_msa_ready_ind_msg_v01 { + char placeholder; +}; + +#define WLFW_MSA_READY_IND_MSG_V01_MAX_MSG_LEN 0 +extern const struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[]; + +struct wlfw_pin_connect_result_ind_msg_v01 { + u8 pwr_pin_result_valid; + u32 pwr_pin_result; + u8 phy_io_pin_result_valid; + u32 phy_io_pin_result; + u8 rf_pin_result_valid; + u32 rf_pin_result; +}; + +#define WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN 21 +extern const struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[]; + +struct wlfw_wlan_mode_req_msg_v01 { + enum wlfw_driver_mode_enum_v01 mode; + u8 hw_debug_valid; + u8 hw_debug; +}; + +#define WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN 11 +extern const struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[]; + +struct wlfw_wlan_mode_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN 7 +extern const struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[]; + +struct wlfw_wlan_cfg_req_msg_v01 { + u8 host_version_valid; + char host_version[QMI_WLFW_MAX_STR_LEN_V01 + 1]; + u8 tgt_cfg_valid; + u32 tgt_cfg_len; + struct wlfw_ce_tgt_pipe_cfg_s_v01 tgt_cfg[QMI_WLFW_MAX_NUM_CE_V01]; + u8 svc_cfg_valid; + u32 svc_cfg_len; + struct wlfw_ce_svc_pipe_cfg_s_v01 svc_cfg[QMI_WLFW_MAX_NUM_SVC_V01]; + u8 shadow_reg_valid; + u32 shadow_reg_len; + struct wlfw_shadow_reg_cfg_s_v01 shadow_reg[QMI_WLFW_MAX_NUM_SHADOW_REG_V01]; + u8 shadow_reg_v2_valid; + u32 shadow_reg_v2_len; + struct wlfw_shadow_reg_v2_cfg_s_v01 shadow_reg_v2[QMI_WLFW_MAX_SHADOW_REG_V2]; +}; + +#define WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN 803 +extern const struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[]; + +struct wlfw_wlan_cfg_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN 7 +extern const struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[]; + +struct wlfw_cap_req_msg_v01 { + char placeholder; +}; + +#define WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN 0 +extern const struct qmi_elem_info wlfw_cap_req_msg_v01_ei[]; + +struct wlfw_cap_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u8 chip_info_valid; + struct wlfw_rf_chip_info_s_v01 chip_info; + u8 board_info_valid; + struct wlfw_rf_board_info_s_v01 board_info; + u8 soc_info_valid; + struct wlfw_soc_info_s_v01 soc_info; + u8 fw_version_info_valid; + struct wlfw_fw_version_info_s_v01 fw_version_info; + u8 fw_build_id_valid; + char fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1]; + u8 num_macs_valid; + u8 num_macs; +}; + +#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 207 +extern const struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[]; + +struct wlfw_bdf_download_req_msg_v01 { + u8 valid; + u8 file_id_valid; + enum wlfw_cal_temp_id_enum_v01 file_id; + u8 total_size_valid; + u32 total_size; + u8 seg_id_valid; + u32 seg_id; + u8 data_valid; + u32 data_len; + u8 data[QMI_WLFW_MAX_DATA_SIZE_V01]; + u8 end_valid; + u8 end; + u8 bdf_type_valid; + u8 bdf_type; +}; + +#define WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6182 +extern const struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[]; + +struct wlfw_bdf_download_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7 +extern const struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[]; + +struct wlfw_cal_report_req_msg_v01 { + u32 meta_data_len; + enum wlfw_cal_temp_id_enum_v01 meta_data[QMI_WLFW_MAX_NUM_CAL_V01]; + u8 xo_cal_data_valid; + u8 xo_cal_data; +}; + +#define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 28 +extern const struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[]; + +struct wlfw_cal_report_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_CAL_REPORT_RESP_MSG_V01_MAX_MSG_LEN 7 +extern const struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[]; + +struct wlfw_initiate_cal_download_ind_msg_v01 { + enum wlfw_cal_temp_id_enum_v01 cal_id; +}; + +#define WLFW_INITIATE_CAL_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN 7 +extern const struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[]; + +struct wlfw_cal_download_req_msg_v01 { + u8 valid; + u8 file_id_valid; + enum wlfw_cal_temp_id_enum_v01 file_id; + u8 total_size_valid; + u32 total_size; + u8 seg_id_valid; + u32 seg_id; + u8 data_valid; + u32 data_len; + u8 data[QMI_WLFW_MAX_DATA_SIZE_V01]; + u8 end_valid; + u8 end; +}; + +#define WLFW_CAL_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6178 +extern const struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[]; + +struct wlfw_cal_download_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_CAL_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7 +extern const struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[]; + +struct wlfw_initiate_cal_update_ind_msg_v01 { + enum wlfw_cal_temp_id_enum_v01 cal_id; + u32 total_size; +}; + +#define WLFW_INITIATE_CAL_UPDATE_IND_MSG_V01_MAX_MSG_LEN 14 +extern const struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[]; + +struct wlfw_cal_update_req_msg_v01 { + enum wlfw_cal_temp_id_enum_v01 cal_id; + u32 seg_id; +}; + +#define WLFW_CAL_UPDATE_REQ_MSG_V01_MAX_MSG_LEN 14 +extern const struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[]; + +struct wlfw_cal_update_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u8 file_id_valid; + enum wlfw_cal_temp_id_enum_v01 file_id; + u8 total_size_valid; + u32 total_size; + u8 seg_id_valid; + u32 seg_id; + u8 data_valid; + u32 data_len; + u8 data[QMI_WLFW_MAX_DATA_SIZE_V01]; + u8 end_valid; + u8 end; +}; + +#define WLFW_CAL_UPDATE_RESP_MSG_V01_MAX_MSG_LEN 6181 +extern const struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[]; + +struct wlfw_msa_info_req_msg_v01 { + u64 msa_addr; + u32 size; +}; + +#define WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN 18 +extern const struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[]; + +struct wlfw_msa_info_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u32 mem_region_info_len; + struct wlfw_memory_region_info_s_v01 mem_region_info[QMI_WLFW_MAX_MEM_REG_V01]; +}; + +#define WLFW_MSA_INFO_RESP_MSG_V01_MAX_MSG_LEN 37 +extern const struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[]; + +struct wlfw_msa_ready_req_msg_v01 { + char placeholder; +}; + +#define WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN 0 +extern const struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[]; + +struct wlfw_msa_ready_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_MSA_READY_RESP_MSG_V01_MAX_MSG_LEN 7 +extern const struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[]; + +struct wlfw_ini_req_msg_v01 { + u8 enablefwlog_valid; + u8 enablefwlog; +}; + +#define WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN 4 +extern const struct qmi_elem_info wlfw_ini_req_msg_v01_ei[]; + +struct wlfw_ini_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_INI_RESP_MSG_V01_MAX_MSG_LEN 7 +extern const struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[]; + +struct wlfw_athdiag_read_req_msg_v01 { + u32 offset; + u32 mem_type; + u32 data_len; +}; + +#define WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN 21 +extern const struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[]; + +struct wlfw_athdiag_read_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u8 data_valid; + u32 data_len; + u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01]; +}; + +#define WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN 6156 +extern const struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[]; + +struct wlfw_athdiag_write_req_msg_v01 { + u32 offset; + u32 mem_type; + u32 data_len; + u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01]; +}; + +#define WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN 6163 +extern const struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[]; + +struct wlfw_athdiag_write_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN 7 +extern const struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[]; + +struct wlfw_vbatt_req_msg_v01 { + u64 voltage_uv; +}; + +#define WLFW_VBATT_REQ_MSG_V01_MAX_MSG_LEN 11 +extern const struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[]; + +struct wlfw_vbatt_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_VBATT_RESP_MSG_V01_MAX_MSG_LEN 7 +extern const struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[]; + +struct wlfw_mac_addr_req_msg_v01 { + u8 mac_addr_valid; + u8 mac_addr[QMI_WLFW_MAC_ADDR_SIZE_V01]; +}; + +#define WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN 9 +extern const struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[]; + +struct wlfw_mac_addr_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_MAC_ADDR_RESP_MSG_V01_MAX_MSG_LEN 7 +extern const struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[]; + +#define QMI_WLFW_MAX_NUM_GPIO_V01 32 +struct wlfw_host_cap_req_msg_v01 { + u8 daemon_support_valid; + u32 daemon_support; + u8 wake_msi_valid; + u32 wake_msi; + u8 gpios_valid; + u32 gpios_len; + u32 gpios[QMI_WLFW_MAX_NUM_GPIO_V01]; + u8 nm_modem_valid; + u8 nm_modem; + u8 bdf_support_valid; + u8 bdf_support; + u8 bdf_cache_support_valid; + u8 bdf_cache_support; + u8 m3_support_valid; + u8 m3_support; + u8 m3_cache_support_valid; + u8 m3_cache_support; + u8 cal_filesys_support_valid; + u8 cal_filesys_support; + u8 cal_cache_support_valid; + u8 cal_cache_support; + u8 cal_done_valid; + u8 cal_done; + u8 mem_bucket_valid; + u32 mem_bucket; + u8 mem_cfg_mode_valid; + u8 mem_cfg_mode; +}; + +#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 189 +extern const struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[]; +extern const struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[]; + +struct wlfw_host_cap_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_HOST_CAP_RESP_MSG_V01_MAX_MSG_LEN 7 +extern const struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[]; + +struct wlfw_request_mem_ind_msg_v01 { + u32 mem_seg_len; + struct wlfw_mem_seg_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01]; +}; + +#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 564 +extern const struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[]; + +struct wlfw_respond_mem_req_msg_v01 { + u32 mem_seg_len; + struct wlfw_mem_seg_resp_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01]; +}; + +#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 260 +extern const struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[]; + +struct wlfw_respond_mem_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_RESPOND_MEM_RESP_MSG_V01_MAX_MSG_LEN 7 +extern const struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[]; + +struct wlfw_mem_ready_ind_msg_v01 { + char placeholder; +}; + +#define WLFW_MEM_READY_IND_MSG_V01_MAX_MSG_LEN 0 +extern const struct qmi_elem_info wlfw_mem_ready_ind_msg_v01_ei[]; + +struct wlfw_fw_init_done_ind_msg_v01 { + char placeholder; +}; + +#define WLFW_FW_INIT_DONE_IND_MSG_V01_MAX_MSG_LEN 0 +extern const struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[]; + +struct wlfw_rejuvenate_ind_msg_v01 { + u8 cause_for_rejuvenation_valid; + u8 cause_for_rejuvenation; + u8 requesting_sub_system_valid; + u8 requesting_sub_system; + u8 line_number_valid; + u16 line_number; + u8 function_name_valid; + char function_name[QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1]; +}; + +#define WLFW_REJUVENATE_IND_MSG_V01_MAX_MSG_LEN 144 +extern const struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[]; + +struct wlfw_rejuvenate_ack_req_msg_v01 { + char placeholder; +}; + +#define WLFW_REJUVENATE_ACK_REQ_MSG_V01_MAX_MSG_LEN 0 +extern const struct qmi_elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[]; + +struct wlfw_rejuvenate_ack_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_REJUVENATE_ACK_RESP_MSG_V01_MAX_MSG_LEN 7 +extern const struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[]; + +struct wlfw_dynamic_feature_mask_req_msg_v01 { + u8 mask_valid; + u64 mask; +}; + +#define WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN 11 +extern const struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[]; + +struct wlfw_dynamic_feature_mask_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u8 prev_mask_valid; + u64 prev_mask; + u8 curr_mask_valid; + u64 curr_mask; +}; + +#define WLFW_DYNAMIC_FEATURE_MASK_RESP_MSG_V01_MAX_MSG_LEN 29 +extern const struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[]; + +struct wlfw_m3_info_req_msg_v01 { + u64 addr; + u32 size; +}; + +#define WLFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN 18 +extern const struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[]; + +struct wlfw_m3_info_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN 7 +extern const struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[]; + +struct wlfw_xo_cal_ind_msg_v01 { + u8 xo_cal_data; +}; + +#define WLFW_XO_CAL_IND_MSG_V01_MAX_MSG_LEN 4 +extern const struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[]; + +#endif diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h index bfec6c8f2ecb..564293df1e9a 100644 --- a/drivers/net/wireless/ath/ath10k/rx_desc.h +++ b/drivers/net/wireless/ath/ath10k/rx_desc.h @@ -1,56 +1,48 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _RX_DESC_H_ #define _RX_DESC_H_ +#include <linux/bitops.h> + enum rx_attention_flags { - RX_ATTENTION_FLAGS_FIRST_MPDU = 1 << 0, - RX_ATTENTION_FLAGS_LAST_MPDU = 1 << 1, - RX_ATTENTION_FLAGS_MCAST_BCAST = 1 << 2, - RX_ATTENTION_FLAGS_PEER_IDX_INVALID = 1 << 3, - RX_ATTENTION_FLAGS_PEER_IDX_TIMEOUT = 1 << 4, - RX_ATTENTION_FLAGS_POWER_MGMT = 1 << 5, - RX_ATTENTION_FLAGS_NON_QOS = 1 << 6, - RX_ATTENTION_FLAGS_NULL_DATA = 1 << 7, - RX_ATTENTION_FLAGS_MGMT_TYPE = 1 << 8, - RX_ATTENTION_FLAGS_CTRL_TYPE = 1 << 9, - RX_ATTENTION_FLAGS_MORE_DATA = 1 << 10, - RX_ATTENTION_FLAGS_EOSP = 1 << 11, - RX_ATTENTION_FLAGS_U_APSD_TRIGGER = 1 << 12, - RX_ATTENTION_FLAGS_FRAGMENT = 1 << 13, - RX_ATTENTION_FLAGS_ORDER = 1 << 14, - RX_ATTENTION_FLAGS_CLASSIFICATION = 1 << 15, - RX_ATTENTION_FLAGS_OVERFLOW_ERR = 1 << 16, - RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR = 1 << 17, - RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL = 1 << 18, - RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL = 1 << 19, - RX_ATTENTION_FLAGS_SA_IDX_INVALID = 1 << 20, - RX_ATTENTION_FLAGS_DA_IDX_INVALID = 1 << 21, - RX_ATTENTION_FLAGS_SA_IDX_TIMEOUT = 1 << 22, - RX_ATTENTION_FLAGS_DA_IDX_TIMEOUT = 1 << 23, - RX_ATTENTION_FLAGS_ENCRYPT_REQUIRED = 1 << 24, - RX_ATTENTION_FLAGS_DIRECTED = 1 << 25, - RX_ATTENTION_FLAGS_BUFFER_FRAGMENT = 1 << 26, - RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR = 1 << 27, - RX_ATTENTION_FLAGS_TKIP_MIC_ERR = 1 << 28, - RX_ATTENTION_FLAGS_DECRYPT_ERR = 1 << 29, - RX_ATTENTION_FLAGS_FCS_ERR = 1 << 30, - RX_ATTENTION_FLAGS_MSDU_DONE = 1 << 31, + RX_ATTENTION_FLAGS_FIRST_MPDU = BIT(0), + RX_ATTENTION_FLAGS_LAST_MPDU = BIT(1), + RX_ATTENTION_FLAGS_MCAST_BCAST = BIT(2), + RX_ATTENTION_FLAGS_PEER_IDX_INVALID = BIT(3), + RX_ATTENTION_FLAGS_PEER_IDX_TIMEOUT = BIT(4), + RX_ATTENTION_FLAGS_POWER_MGMT = BIT(5), + RX_ATTENTION_FLAGS_NON_QOS = BIT(6), + RX_ATTENTION_FLAGS_NULL_DATA = BIT(7), + RX_ATTENTION_FLAGS_MGMT_TYPE = BIT(8), + RX_ATTENTION_FLAGS_CTRL_TYPE = BIT(9), + RX_ATTENTION_FLAGS_MORE_DATA = BIT(10), + RX_ATTENTION_FLAGS_EOSP = BIT(11), + RX_ATTENTION_FLAGS_U_APSD_TRIGGER = BIT(12), + RX_ATTENTION_FLAGS_FRAGMENT = BIT(13), + RX_ATTENTION_FLAGS_ORDER = BIT(14), + RX_ATTENTION_FLAGS_CLASSIFICATION = BIT(15), + RX_ATTENTION_FLAGS_OVERFLOW_ERR = BIT(16), + RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR = BIT(17), + RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL = BIT(18), + RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL = BIT(19), + RX_ATTENTION_FLAGS_SA_IDX_INVALID = BIT(20), + RX_ATTENTION_FLAGS_DA_IDX_INVALID = BIT(21), + RX_ATTENTION_FLAGS_SA_IDX_TIMEOUT = BIT(22), + RX_ATTENTION_FLAGS_DA_IDX_TIMEOUT = BIT(23), + RX_ATTENTION_FLAGS_ENCRYPT_REQUIRED = BIT(24), + RX_ATTENTION_FLAGS_DIRECTED = BIT(25), + RX_ATTENTION_FLAGS_BUFFER_FRAGMENT = BIT(26), + RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR = BIT(27), + RX_ATTENTION_FLAGS_TKIP_MIC_ERR = BIT(28), + RX_ATTENTION_FLAGS_DECRYPT_ERR = BIT(29), + RX_ATTENTION_FLAGS_FCS_ERR = BIT(30), + RX_ATTENTION_FLAGS_MSDU_DONE = BIT(31), }; struct rx_attention { @@ -77,7 +69,7 @@ struct rx_attention { * first_msdu is set. * * peer_idx_invalid - * Indicates no matching entries within the the max search + * Indicates no matching entries within the max search * count. Only set when first_msdu is set. * * peer_idx_timeout @@ -205,13 +197,31 @@ struct rx_attention { * descriptor. */ -struct rx_frag_info { +struct rx_frag_info_common { u8 ring0_more_count; u8 ring1_more_count; u8 ring2_more_count; u8 ring3_more_count; } __packed; +struct rx_frag_info_wcn3990 { + u8 ring4_more_count; + u8 ring5_more_count; + u8 ring6_more_count; + u8 ring7_more_count; +} __packed; + +struct rx_frag_info { + struct rx_frag_info_common common; + union { + struct rx_frag_info_wcn3990 wcn3990; + } __packed; +} __packed; + +struct rx_frag_info_v1 { + struct rx_frag_info_common common; +} __packed; + /* * ring0_more_count * Indicates the number of more buffers associated with RX DMA @@ -239,6 +249,9 @@ enum htt_rx_mpdu_encrypt_type { HTT_RX_MPDU_ENCRYPT_WAPI = 5, HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2 = 6, HTT_RX_MPDU_ENCRYPT_NONE = 7, + HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2 = 8, + HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2 = 9, + HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2 = 10, }; #define RX_MPDU_START_INFO0_PEER_IDX_MASK 0x000007ff @@ -247,15 +260,15 @@ enum htt_rx_mpdu_encrypt_type { #define RX_MPDU_START_INFO0_SEQ_NUM_LSB 16 #define RX_MPDU_START_INFO0_ENCRYPT_TYPE_MASK 0xf0000000 #define RX_MPDU_START_INFO0_ENCRYPT_TYPE_LSB 28 -#define RX_MPDU_START_INFO0_FROM_DS (1 << 11) -#define RX_MPDU_START_INFO0_TO_DS (1 << 12) -#define RX_MPDU_START_INFO0_ENCRYPTED (1 << 13) -#define RX_MPDU_START_INFO0_RETRY (1 << 14) -#define RX_MPDU_START_INFO0_TXBF_H_INFO (1 << 15) +#define RX_MPDU_START_INFO0_FROM_DS BIT(11) +#define RX_MPDU_START_INFO0_TO_DS BIT(12) +#define RX_MPDU_START_INFO0_ENCRYPTED BIT(13) +#define RX_MPDU_START_INFO0_RETRY BIT(14) +#define RX_MPDU_START_INFO0_TXBF_H_INFO BIT(15) #define RX_MPDU_START_INFO1_TID_MASK 0xf0000000 #define RX_MPDU_START_INFO1_TID_LSB 28 -#define RX_MPDU_START_INFO1_DIRECTED (1 << 16) +#define RX_MPDU_START_INFO1_DIRECTED BIT(16) struct rx_mpdu_start { __le32 info0; @@ -350,13 +363,13 @@ struct rx_mpdu_start { #define RX_MPDU_END_INFO0_RESERVED_0_LSB 0 #define RX_MPDU_END_INFO0_POST_DELIM_CNT_MASK 0x0fff0000 #define RX_MPDU_END_INFO0_POST_DELIM_CNT_LSB 16 -#define RX_MPDU_END_INFO0_OVERFLOW_ERR (1 << 13) -#define RX_MPDU_END_INFO0_LAST_MPDU (1 << 14) -#define RX_MPDU_END_INFO0_POST_DELIM_ERR (1 << 15) -#define RX_MPDU_END_INFO0_MPDU_LENGTH_ERR (1 << 28) -#define RX_MPDU_END_INFO0_TKIP_MIC_ERR (1 << 29) -#define RX_MPDU_END_INFO0_DECRYPT_ERR (1 << 30) -#define RX_MPDU_END_INFO0_FCS_ERR (1 << 31) +#define RX_MPDU_END_INFO0_OVERFLOW_ERR BIT(13) +#define RX_MPDU_END_INFO0_LAST_MPDU BIT(14) +#define RX_MPDU_END_INFO0_POST_DELIM_ERR BIT(15) +#define RX_MPDU_END_INFO0_MPDU_LENGTH_ERR BIT(28) +#define RX_MPDU_END_INFO0_TKIP_MIC_ERR BIT(29) +#define RX_MPDU_END_INFO0_DECRYPT_ERR BIT(30) +#define RX_MPDU_END_INFO0_FCS_ERR BIT(31) struct rx_mpdu_end { __le32 info0; @@ -415,26 +428,78 @@ struct rx_mpdu_end { #define RX_MSDU_START_INFO1_DECAP_FORMAT_LSB 8 #define RX_MSDU_START_INFO1_SA_IDX_MASK 0x07ff0000 #define RX_MSDU_START_INFO1_SA_IDX_LSB 16 -#define RX_MSDU_START_INFO1_IPV4_PROTO (1 << 10) -#define RX_MSDU_START_INFO1_IPV6_PROTO (1 << 11) -#define RX_MSDU_START_INFO1_TCP_PROTO (1 << 12) -#define RX_MSDU_START_INFO1_UDP_PROTO (1 << 13) -#define RX_MSDU_START_INFO1_IP_FRAG (1 << 14) -#define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15) +#define RX_MSDU_START_INFO1_IPV4_PROTO BIT(10) +#define RX_MSDU_START_INFO1_IPV6_PROTO BIT(11) +#define RX_MSDU_START_INFO1_TCP_PROTO BIT(12) +#define RX_MSDU_START_INFO1_UDP_PROTO BIT(13) +#define RX_MSDU_START_INFO1_IP_FRAG BIT(14) +#define RX_MSDU_START_INFO1_TCP_ONLY_ACK BIT(15) +#define RX_MSDU_START_INFO2_DA_IDX_MASK 0x000007ff +#define RX_MSDU_START_INFO2_DA_IDX_LSB 0 +#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_MASK 0x00ff0000 +#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_LSB 16 +#define RX_MSDU_START_INFO2_DA_BCAST_MCAST BIT(11) + +/* The decapped header (rx_hdr_status) contains the following: + * a) 802.11 header + * [padding to 4 bytes] + * b) HW crypto parameter + * - 0 bytes for no security + * - 4 bytes for WEP + * - 8 bytes for TKIP, AES + * [padding to 4 bytes] + * c) A-MSDU subframe header (14 bytes) if applicable + * d) LLC/SNAP (RFC1042, 8 bytes) + * + * In case of A-MSDU only first frame in sequence contains (a) and (b). + */ enum rx_msdu_decap_format { - RX_MSDU_DECAP_RAW = 0, - RX_MSDU_DECAP_NATIVE_WIFI = 1, + RX_MSDU_DECAP_RAW = 0, + + /* Note: QoS frames are reported as non-QoS. The rx_hdr_status in + * htt_rx_desc contains the original decapped 802.11 header. + */ + RX_MSDU_DECAP_NATIVE_WIFI = 1, + + /* Payload contains an ethernet header (struct ethhdr). */ RX_MSDU_DECAP_ETHERNET2_DIX = 2, + + /* Payload contains two 48-bit addresses and 2-byte length (14 bytes + * total), followed by an RFC1042 header (8 bytes). + */ RX_MSDU_DECAP_8023_SNAP_LLC = 3 }; -struct rx_msdu_start { +struct rx_msdu_start_common { __le32 info0; /* %RX_MSDU_START_INFO0_ */ __le32 flow_id_crc; __le32 info1; /* %RX_MSDU_START_INFO1_ */ } __packed; +struct rx_msdu_start_qca99x0 { + __le32 info2; /* %RX_MSDU_START_INFO2_ */ +} __packed; + +struct rx_msdu_start_wcn3990 { + __le32 info2; /* %RX_MSDU_START_INFO2_ */ + __le32 info3; /* %RX_MSDU_START_INFO3_ */ +} __packed; + +struct rx_msdu_start { + struct rx_msdu_start_common common; + union { + struct rx_msdu_start_wcn3990 wcn3990; + } __packed; +} __packed; + +struct rx_msdu_start_v1 { + struct rx_msdu_start_common common; + union { + struct rx_msdu_start_qca99x0 qca99x0; + } __packed; +} __packed; + /* * msdu_length * MSDU length in bytes after decapsulation. This field is @@ -515,12 +580,13 @@ struct rx_msdu_start { #define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_MASK 0x00003fff #define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_LSB 0 -#define RX_MSDU_END_INFO0_FIRST_MSDU (1 << 14) -#define RX_MSDU_END_INFO0_LAST_MSDU (1 << 15) -#define RX_MSDU_END_INFO0_PRE_DELIM_ERR (1 << 30) -#define RX_MSDU_END_INFO0_RESERVED_3B (1 << 31) +#define RX_MSDU_END_INFO0_FIRST_MSDU BIT(14) +#define RX_MSDU_END_INFO0_LAST_MSDU BIT(15) +#define RX_MSDU_END_INFO0_MSDU_LIMIT_ERR BIT(18) +#define RX_MSDU_END_INFO0_PRE_DELIM_ERR BIT(30) +#define RX_MSDU_END_INFO0_RESERVED_3B BIT(31) -struct rx_msdu_end { +struct rx_msdu_end_common { __le16 ip_hdr_cksum; __le16 tcp_hdr_cksum; u8 key_id_octet; @@ -529,6 +595,55 @@ struct rx_msdu_end { __le32 info0; } __packed; +#define RX_MSDU_END_INFO1_TCP_FLAG_MASK 0x000001ff +#define RX_MSDU_END_INFO1_TCP_FLAG_LSB 0 +#define RX_MSDU_END_INFO1_L3_HDR_PAD_MASK 0x00001c00 +#define RX_MSDU_END_INFO1_L3_HDR_PAD_LSB 10 +#define RX_MSDU_END_INFO1_WINDOW_SIZE_MASK 0xffff0000 +#define RX_MSDU_END_INFO1_WINDOW_SIZE_LSB 16 +#define RX_MSDU_END_INFO1_IRO_ELIGIBLE BIT(9) + +#define RX_MSDU_END_INFO2_DA_OFFSET_MASK 0x0000003f +#define RX_MSDU_END_INFO2_DA_OFFSET_LSB 0 +#define RX_MSDU_END_INFO2_SA_OFFSET_MASK 0x00000fc0 +#define RX_MSDU_END_INFO2_SA_OFFSET_LSB 6 +#define RX_MSDU_END_INFO2_TYPE_OFFSET_MASK 0x0003f000 +#define RX_MSDU_END_INFO2_TYPE_OFFSET_LSB 12 + +struct rx_msdu_end_qca99x0 { + __le32 ipv6_crc; + __le32 tcp_seq_no; + __le32 tcp_ack_no; + __le32 info1; + __le32 info2; +} __packed; + +struct rx_msdu_end_wcn3990 { + __le32 ipv6_crc; + __le32 tcp_seq_no; + __le32 tcp_ack_no; + __le32 info1; + __le32 info2; + __le32 rule_indication_0; + __le32 rule_indication_1; + __le32 rule_indication_2; + __le32 rule_indication_3; +} __packed; + +struct rx_msdu_end { + struct rx_msdu_end_common common; + union { + struct rx_msdu_end_wcn3990 wcn3990; + } __packed; +} __packed; + +struct rx_msdu_end_v1 { + struct rx_msdu_end_common common; + union { + struct rx_msdu_end_qca99x0 qca99x0; + } __packed; +} __packed; + /* *ip_hdr_chksum * This can include the IP header checksum or the pseudo header @@ -578,6 +693,12 @@ struct rx_msdu_end { * Indicates the last MSDU of the A-MSDU. MPDU end status is * only valid when last_msdu is set. * + *msdu_limit_error + * Indicates that the MSDU threshold was exceeded and thus + * all the rest of the MSDUs will not be scattered and + * will not be decapsulated but will be received in RAW format + * as a single MSDU buffer. + * *reserved_3a * Reserved: HW should fill with zero. FW should ignore. * @@ -589,33 +710,13 @@ struct rx_msdu_end { * Reserved: HW should fill with zero. FW should ignore. */ -#define RX_PPDU_START_SIG_RATE_SELECT_OFDM 0 -#define RX_PPDU_START_SIG_RATE_SELECT_CCK 1 - -#define RX_PPDU_START_SIG_RATE_OFDM_48 0 -#define RX_PPDU_START_SIG_RATE_OFDM_24 1 -#define RX_PPDU_START_SIG_RATE_OFDM_12 2 -#define RX_PPDU_START_SIG_RATE_OFDM_6 3 -#define RX_PPDU_START_SIG_RATE_OFDM_54 4 -#define RX_PPDU_START_SIG_RATE_OFDM_36 5 -#define RX_PPDU_START_SIG_RATE_OFDM_18 6 -#define RX_PPDU_START_SIG_RATE_OFDM_9 7 - -#define RX_PPDU_START_SIG_RATE_CCK_LP_11 0 -#define RX_PPDU_START_SIG_RATE_CCK_LP_5_5 1 -#define RX_PPDU_START_SIG_RATE_CCK_LP_2 2 -#define RX_PPDU_START_SIG_RATE_CCK_LP_1 3 -#define RX_PPDU_START_SIG_RATE_CCK_SP_11 4 -#define RX_PPDU_START_SIG_RATE_CCK_SP_5_5 5 -#define RX_PPDU_START_SIG_RATE_CCK_SP_2 6 - #define HTT_RX_PPDU_START_PREAMBLE_LEGACY 0x04 #define HTT_RX_PPDU_START_PREAMBLE_HT 0x08 #define HTT_RX_PPDU_START_PREAMBLE_HT_WITH_TXBF 0x09 #define HTT_RX_PPDU_START_PREAMBLE_VHT 0x0C #define HTT_RX_PPDU_START_PREAMBLE_VHT_WITH_TXBF 0x0D -#define RX_PPDU_START_INFO0_IS_GREENFIELD (1 << 0) +#define RX_PPDU_START_INFO0_IS_GREENFIELD BIT(0) #define RX_PPDU_START_INFO1_L_SIG_RATE_MASK 0x0000000f #define RX_PPDU_START_INFO1_L_SIG_RATE_LSB 0 @@ -625,15 +726,15 @@ struct rx_msdu_end { #define RX_PPDU_START_INFO1_L_SIG_TAIL_LSB 18 #define RX_PPDU_START_INFO1_PREAMBLE_TYPE_MASK 0xff000000 #define RX_PPDU_START_INFO1_PREAMBLE_TYPE_LSB 24 -#define RX_PPDU_START_INFO1_L_SIG_RATE_SELECT (1 << 4) -#define RX_PPDU_START_INFO1_L_SIG_PARITY (1 << 17) +#define RX_PPDU_START_INFO1_L_SIG_RATE_SELECT BIT(4) +#define RX_PPDU_START_INFO1_L_SIG_PARITY BIT(17) #define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_MASK 0x00ffffff #define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_LSB 0 #define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_MASK 0x00ffffff #define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_LSB 0 -#define RX_PPDU_START_INFO3_TXBF_H_INFO (1 << 24) +#define RX_PPDU_START_INFO3_TXBF_H_INFO BIT(24) #define RX_PPDU_START_INFO4_VHT_SIG_B_MASK 0x1fffffff #define RX_PPDU_START_INFO4_VHT_SIG_B_LSB 0 @@ -641,6 +742,9 @@ struct rx_msdu_end { #define RX_PPDU_START_INFO5_SERVICE_MASK 0x0000ffff #define RX_PPDU_START_INFO5_SERVICE_LSB 0 +/* No idea what this flag means. It seems to be always set in rate. */ +#define RX_PPDU_START_RATE_FLAG BIT(3) + struct rx_ppdu_start { struct { u8 pri20_mhz; @@ -817,21 +921,24 @@ struct rx_ppdu_start { * * reserved_9 * Reserved: HW should fill with 0, FW should ignore. -*/ - + */ -#define RX_PPDU_END_FLAGS_PHY_ERR (1 << 0) -#define RX_PPDU_END_FLAGS_RX_LOCATION (1 << 1) -#define RX_PPDU_END_FLAGS_TXBF_H_INFO (1 << 2) +#define RX_PPDU_END_FLAGS_PHY_ERR BIT(0) +#define RX_PPDU_END_FLAGS_RX_LOCATION BIT(1) +#define RX_PPDU_END_FLAGS_TXBF_H_INFO BIT(2) #define RX_PPDU_END_INFO0_RX_ANTENNA_MASK 0x00ffffff #define RX_PPDU_END_INFO0_RX_ANTENNA_LSB 0 -#define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK (1 << 24) -#define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL (1 << 25) +#define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK BIT(24) +#define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL BIT(25) -#define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15) +#define RX_PPDU_END_INFO1_PEER_IDX_MASK 0x1ffc +#define RX_PPDU_END_INFO1_PEER_IDX_LSB 2 +#define RX_PPDU_END_INFO1_BB_DATA BIT(0) +#define RX_PPDU_END_INFO1_PEER_IDX_VALID BIT(1) +#define RX_PPDU_END_INFO1_PPDU_DONE BIT(15) -struct rx_ppdu_end { +struct rx_ppdu_end_common { __le32 evm_p0; __le32 evm_p1; __le32 evm_p2; @@ -850,6 +957,9 @@ struct rx_ppdu_end { __le32 evm_p15; __le32 tsf_timestamp; __le32 wb_timestamp; +} __packed; + +struct rx_ppdu_end_qca988x { u8 locationing_timestamp; u8 phy_err_code; __le16 flags; /* %RX_PPDU_END_FLAGS_ */ @@ -858,6 +968,215 @@ struct rx_ppdu_end { __le16 info1; /* %RX_PPDU_END_INFO1_ */ } __packed; +#define RX_PPDU_END_RTT_CORRELATION_VALUE_MASK 0x00ffffff +#define RX_PPDU_END_RTT_CORRELATION_VALUE_LSB 0 +#define RX_PPDU_END_RTT_UNUSED_MASK 0x7f000000 +#define RX_PPDU_END_RTT_UNUSED_LSB 24 +#define RX_PPDU_END_RTT_NORMAL_MODE BIT(31) + +struct rx_ppdu_end_qca6174 { + u8 locationing_timestamp; + u8 phy_err_code; + __le16 flags; /* %RX_PPDU_END_FLAGS_ */ + __le32 info0; /* %RX_PPDU_END_INFO0_ */ + __le32 rtt; /* %RX_PPDU_END_RTT_ */ + __le16 bb_length; + __le16 info1; /* %RX_PPDU_END_INFO1_ */ +} __packed; + +#define RX_PKT_END_INFO0_RX_SUCCESS BIT(0) +#define RX_PKT_END_INFO0_ERR_TX_INTERRUPT_RX BIT(3) +#define RX_PKT_END_INFO0_ERR_OFDM_POWER_DROP BIT(4) +#define RX_PKT_END_INFO0_ERR_OFDM_RESTART BIT(5) +#define RX_PKT_END_INFO0_ERR_CCK_POWER_DROP BIT(6) +#define RX_PKT_END_INFO0_ERR_CCK_RESTART BIT(7) + +#define RX_LOCATION_INFO_RTT_CORR_VAL_MASK 0x0001ffff +#define RX_LOCATION_INFO_RTT_CORR_VAL_LSB 0 +#define RX_LOCATION_INFO_FAC_STATUS_MASK 0x000c0000 +#define RX_LOCATION_INFO_FAC_STATUS_LSB 18 +#define RX_LOCATION_INFO_PKT_BW_MASK 0x00700000 +#define RX_LOCATION_INFO_PKT_BW_LSB 20 +#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_MASK 0x01800000 +#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_LSB 23 +#define RX_LOCATION_INFO_CIR_STATUS BIT(17) +#define RX_LOCATION_INFO_RTT_MAC_PHY_PHASE BIT(25) +#define RX_LOCATION_INFO_RTT_TX_DATA_START_X BIT(26) +#define RX_LOCATION_INFO_HW_IFFT_MODE BIT(30) +#define RX_LOCATION_INFO_RX_LOCATION_VALID BIT(31) + +struct rx_pkt_end { + __le32 info0; /* %RX_PKT_END_INFO0_ */ + __le32 phy_timestamp_1; + __le32 phy_timestamp_2; +} __packed; + +struct rx_pkt_end_wcn3990 { + __le32 info0; /* %RX_PKT_END_INFO0_ */ + __le64 phy_timestamp_1; + __le64 phy_timestamp_2; +} __packed; + +#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_MASK 0x00003fff +#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_LSB 0 +#define RX_LOCATION_INFO0_RTT_FAC_VHT_MASK 0x1fff8000 +#define RX_LOCATION_INFO0_RTT_FAC_VHT_LSB 15 +#define RX_LOCATION_INFO0_RTT_STRONGEST_CHAIN_MASK 0xc0000000 +#define RX_LOCATION_INFO0_RTT_STRONGEST_CHAIN_LSB 30 +#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_STATUS BIT(14) +#define RX_LOCATION_INFO0_RTT_FAC_VHT_STATUS BIT(29) + +#define RX_LOCATION_INFO1_RTT_PREAMBLE_TYPE_MASK 0x0000000c +#define RX_LOCATION_INFO1_RTT_PREAMBLE_TYPE_LSB 2 +#define RX_LOCATION_INFO1_PKT_BW_MASK 0x00000030 +#define RX_LOCATION_INFO1_PKT_BW_LSB 4 +#define RX_LOCATION_INFO1_SKIP_P_SKIP_BTCF_MASK 0x0000ff00 +#define RX_LOCATION_INFO1_SKIP_P_SKIP_BTCF_LSB 8 +#define RX_LOCATION_INFO1_RTT_MSC_RATE_MASK 0x000f0000 +#define RX_LOCATION_INFO1_RTT_MSC_RATE_LSB 16 +#define RX_LOCATION_INFO1_RTT_PBD_LEG_BW_MASK 0x00300000 +#define RX_LOCATION_INFO1_RTT_PBD_LEG_BW_LSB 20 +#define RX_LOCATION_INFO1_TIMING_BACKOFF_MASK 0x07c00000 +#define RX_LOCATION_INFO1_TIMING_BACKOFF_LSB 22 +#define RX_LOCATION_INFO1_RTT_TX_FRAME_PHASE_MASK 0x18000000 +#define RX_LOCATION_INFO1_RTT_TX_FRAME_PHASE_LSB 27 +#define RX_LOCATION_INFO1_RTT_CFR_STATUS BIT(0) +#define RX_LOCATION_INFO1_RTT_CIR_STATUS BIT(1) +#define RX_LOCATION_INFO1_RTT_GI_TYPE BIT(7) +#define RX_LOCATION_INFO1_RTT_MAC_PHY_PHASE BIT(29) +#define RX_LOCATION_INFO1_RTT_TX_DATA_START_X_PHASE BIT(30) +#define RX_LOCATION_INFO1_RX_LOCATION_VALID BIT(31) + +struct rx_location_info { + __le32 rx_location_info0; /* %RX_LOCATION_INFO0_ */ + __le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */ +} __packed; + +struct rx_location_info_wcn3990 { + __le32 rx_location_info0; /* %RX_LOCATION_INFO0_ */ + __le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */ + __le32 rx_location_info2; /* %RX_LOCATION_INFO2_ */ +} __packed; + +enum rx_phy_ppdu_end_info0 { + RX_PHY_PPDU_END_INFO0_ERR_RADAR = BIT(2), + RX_PHY_PPDU_END_INFO0_ERR_RX_ABORT = BIT(3), + RX_PHY_PPDU_END_INFO0_ERR_RX_NAP = BIT(4), + RX_PHY_PPDU_END_INFO0_ERR_OFDM_TIMING = BIT(5), + RX_PHY_PPDU_END_INFO0_ERR_OFDM_PARITY = BIT(6), + RX_PHY_PPDU_END_INFO0_ERR_OFDM_RATE = BIT(7), + RX_PHY_PPDU_END_INFO0_ERR_OFDM_LENGTH = BIT(8), + RX_PHY_PPDU_END_INFO0_ERR_OFDM_RESTART = BIT(9), + RX_PHY_PPDU_END_INFO0_ERR_OFDM_SERVICE = BIT(10), + RX_PHY_PPDU_END_INFO0_ERR_OFDM_POWER_DROP = BIT(11), + RX_PHY_PPDU_END_INFO0_ERR_CCK_BLOCKER = BIT(12), + RX_PHY_PPDU_END_INFO0_ERR_CCK_TIMING = BIT(13), + RX_PHY_PPDU_END_INFO0_ERR_CCK_HEADER_CRC = BIT(14), + RX_PHY_PPDU_END_INFO0_ERR_CCK_RATE = BIT(15), + RX_PHY_PPDU_END_INFO0_ERR_CCK_LENGTH = BIT(16), + RX_PHY_PPDU_END_INFO0_ERR_CCK_RESTART = BIT(17), + RX_PHY_PPDU_END_INFO0_ERR_CCK_SERVICE = BIT(18), + RX_PHY_PPDU_END_INFO0_ERR_CCK_POWER_DROP = BIT(19), + RX_PHY_PPDU_END_INFO0_ERR_HT_CRC = BIT(20), + RX_PHY_PPDU_END_INFO0_ERR_HT_LENGTH = BIT(21), + RX_PHY_PPDU_END_INFO0_ERR_HT_RATE = BIT(22), + RX_PHY_PPDU_END_INFO0_ERR_HT_ZLF = BIT(23), + RX_PHY_PPDU_END_INFO0_ERR_FALSE_RADAR_EXT = BIT(24), + RX_PHY_PPDU_END_INFO0_ERR_GREEN_FIELD = BIT(25), + RX_PHY_PPDU_END_INFO0_ERR_SPECTRAL_SCAN = BIT(26), + RX_PHY_PPDU_END_INFO0_ERR_RX_DYN_BW = BIT(27), + RX_PHY_PPDU_END_INFO0_ERR_LEG_HT_MISMATCH = BIT(28), + RX_PHY_PPDU_END_INFO0_ERR_VHT_CRC = BIT(29), + RX_PHY_PPDU_END_INFO0_ERR_VHT_SIGA = BIT(30), + RX_PHY_PPDU_END_INFO0_ERR_VHT_LSIG = BIT(31), +}; + +enum rx_phy_ppdu_end_info1 { + RX_PHY_PPDU_END_INFO1_ERR_VHT_NDP = BIT(0), + RX_PHY_PPDU_END_INFO1_ERR_VHT_NSYM = BIT(1), + RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_EXT_SYM = BIT(2), + RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID0 = BIT(3), + RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID1_62 = BIT(4), + RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID63 = BIT(5), + RX_PHY_PPDU_END_INFO1_ERR_OFDM_LDPC_DECODER = BIT(6), + RX_PHY_PPDU_END_INFO1_ERR_DEFER_NAP = BIT(7), + RX_PHY_PPDU_END_INFO1_ERR_FDOMAIN_TIMEOUT = BIT(8), + RX_PHY_PPDU_END_INFO1_ERR_LSIG_REL_CHECK = BIT(9), + RX_PHY_PPDU_END_INFO1_ERR_BT_COLLISION = BIT(10), + RX_PHY_PPDU_END_INFO1_ERR_MU_FEEDBACK = BIT(11), + RX_PHY_PPDU_END_INFO1_ERR_TX_INTERRUPT_RX = BIT(12), + RX_PHY_PPDU_END_INFO1_ERR_RX_CBF = BIT(13), +}; + +struct rx_phy_ppdu_end { + __le32 info0; /* %RX_PHY_PPDU_END_INFO0_ */ + __le32 info1; /* %RX_PHY_PPDU_END_INFO1_ */ +} __packed; + +#define RX_PPDU_END_RX_TIMING_OFFSET_MASK 0x00000fff +#define RX_PPDU_END_RX_TIMING_OFFSET_LSB 0 + +#define RX_PPDU_END_RX_INFO_RX_ANTENNA_MASK 0x00ffffff +#define RX_PPDU_END_RX_INFO_RX_ANTENNA_LSB 0 +#define RX_PPDU_END_RX_INFO_TX_HT_VHT_ACK BIT(24) +#define RX_PPDU_END_RX_INFO_RX_PKT_END_VALID BIT(25) +#define RX_PPDU_END_RX_INFO_RX_PHY_PPDU_END_VALID BIT(26) +#define RX_PPDU_END_RX_INFO_RX_TIMING_OFFSET_VALID BIT(27) +#define RX_PPDU_END_RX_INFO_BB_CAPTURED_CHANNEL BIT(28) +#define RX_PPDU_END_RX_INFO_UNSUPPORTED_MU_NC BIT(29) +#define RX_PPDU_END_RX_INFO_OTP_TXBF_DISABLE BIT(30) + +struct rx_ppdu_end_qca99x0 { + struct rx_pkt_end rx_pkt_end; + __le32 rx_location_info; /* %RX_LOCATION_INFO_ */ + struct rx_phy_ppdu_end rx_phy_ppdu_end; + __le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */ + __le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */ + __le16 bb_length; + __le16 info1; /* %RX_PPDU_END_INFO1_ */ +} __packed; + +struct rx_ppdu_end_qca9984 { + struct rx_pkt_end rx_pkt_end; + struct rx_location_info rx_location_info; + struct rx_phy_ppdu_end rx_phy_ppdu_end; + __le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */ + __le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */ + __le16 bb_length; + __le16 info1; /* %RX_PPDU_END_INFO1_ */ +} __packed; + +struct rx_ppdu_end_wcn3990 { + struct rx_pkt_end_wcn3990 rx_pkt_end; + struct rx_location_info_wcn3990 rx_location_info; + struct rx_phy_ppdu_end rx_phy_ppdu_end; + __le32 rx_timing_offset; + __le32 reserved_info_0; + __le32 reserved_info_1; + __le32 rx_antenna_info; + __le32 rx_coex_info; + __le32 rx_mpdu_cnt_info; + __le64 phy_timestamp_tx; + __le32 rx_bb_length; +} __packed; + +struct rx_ppdu_end { + struct rx_ppdu_end_common common; + union { + struct rx_ppdu_end_wcn3990 wcn3990; + } __packed; +} __packed; + +struct rx_ppdu_end_v1 { + struct rx_ppdu_end_common common; + union { + struct rx_ppdu_end_qca988x qca988x; + struct rx_ppdu_end_qca6174 qca6174; + struct rx_ppdu_end_qca99x0 qca99x0; + struct rx_ppdu_end_qca9984 qca9984; + } __packed; +} __packed; + /* * evm_p0 * EVM for pilot 0. Contain EVM for streams: 0, 1, 2 and 3. @@ -975,11 +1294,11 @@ struct rx_ppdu_end { * Every time HW sets this bit in memory FW/SW must clear this * bit in memory. FW will initialize all the ppdu_done dword * to 0. -*/ + */ -#define FW_RX_DESC_INFO0_DISCARD (1 << 0) -#define FW_RX_DESC_INFO0_FORWARD (1 << 1) -#define FW_RX_DESC_INFO0_INSPECT (1 << 5) +#define FW_RX_DESC_INFO0_DISCARD BIT(0) +#define FW_RX_DESC_INFO0_FORWARD BIT(1) +#define FW_RX_DESC_INFO0_INSPECT BIT(5) #define FW_RX_DESC_INFO0_EXT_MASK 0xC0 #define FW_RX_DESC_INFO0_EXT_LSB 6 @@ -987,4 +1306,31 @@ struct fw_rx_desc_base { u8 info0; } __packed; +#define FW_RX_DESC_FLAGS_FIRST_MSDU (1 << 0) +#define FW_RX_DESC_FLAGS_LAST_MSDU (1 << 1) +#define FW_RX_DESC_C3_FAILED (1 << 2) +#define FW_RX_DESC_C4_FAILED (1 << 3) +#define FW_RX_DESC_IPV6 (1 << 4) +#define FW_RX_DESC_TCP (1 << 5) +#define FW_RX_DESC_UDP (1 << 6) + +struct fw_rx_desc_hl { + union { + struct { + u8 discard:1, + forward:1, + any_err:1, + dup_err:1, + reserved:1, + inspect:1, + extension:2; + } bits; + u8 info0; + } u; + + u8 version; + u8 len; + u8 flags; +} __packed; + #endif /* _RX_DESC_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c new file mode 100644 index 000000000000..c06d50db40b8 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -0,0 +1,2678 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2004-2011 Atheros Communications Inc. + * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc. + * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com> + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/module.h> +#include <linux/mmc/card.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/host.h> +#include <linux/mmc/sdio_func.h> +#include <linux/mmc/sdio_ids.h> +#include <linux/mmc/sdio.h> +#include <linux/mmc/sd.h> +#include <linux/bitfield.h> +#include "core.h" +#include "bmi.h" +#include "debug.h" +#include "hif.h" +#include "htc.h" +#include "mac.h" +#include "targaddrs.h" +#include "trace.h" +#include "sdio.h" +#include "coredump.h" + +void ath10k_sdio_fw_crashed_dump(struct ath10k *ar); + +#define ATH10K_SDIO_VSG_BUF_SIZE (64 * 1024) + +/* inlined helper functions */ + +static inline int ath10k_sdio_calc_txrx_padded_len(struct ath10k_sdio *ar_sdio, + size_t len) +{ + return __ALIGN_MASK((len), ar_sdio->mbox_info.block_mask); +} + +static inline enum ath10k_htc_ep_id pipe_id_to_eid(u8 pipe_id) +{ + return (enum ath10k_htc_ep_id)pipe_id; +} + +static inline void ath10k_sdio_mbox_free_rx_pkt(struct ath10k_sdio_rx_data *pkt) +{ + dev_kfree_skb(pkt->skb); + pkt->skb = NULL; + pkt->alloc_len = 0; + pkt->act_len = 0; + pkt->trailer_only = false; +} + +static inline int ath10k_sdio_mbox_alloc_rx_pkt(struct ath10k_sdio_rx_data *pkt, + size_t act_len, size_t full_len, + bool part_of_bundle, + bool last_in_bundle) +{ + pkt->skb = dev_alloc_skb(full_len); + if (!pkt->skb) + return -ENOMEM; + + pkt->act_len = act_len; + pkt->alloc_len = full_len; + pkt->part_of_bundle = part_of_bundle; + pkt->last_in_bundle = last_in_bundle; + pkt->trailer_only = false; + + return 0; +} + +static inline bool is_trailer_only_msg(struct ath10k_sdio_rx_data *pkt) +{ + bool trailer_only = false; + struct ath10k_htc_hdr *htc_hdr = + (struct ath10k_htc_hdr *)pkt->skb->data; + u16 len = __le16_to_cpu(htc_hdr->len); + + if (len == htc_hdr->trailer_len) + trailer_only = true; + + return trailer_only; +} + +/* sdio/mmc functions */ + +static inline void ath10k_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw, + unsigned int address, + unsigned char val) +{ + *arg = FIELD_PREP(BIT(31), write) | + FIELD_PREP(BIT(27), raw) | + FIELD_PREP(BIT(26), 1) | + FIELD_PREP(GENMASK(25, 9), address) | + FIELD_PREP(BIT(8), 1) | + FIELD_PREP(GENMASK(7, 0), val); +} + +static int ath10k_sdio_func0_cmd52_wr_byte(struct mmc_card *card, + unsigned int address, + unsigned char byte) +{ + struct mmc_command io_cmd; + + memset(&io_cmd, 0, sizeof(io_cmd)); + ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte); + io_cmd.opcode = SD_IO_RW_DIRECT; + io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC; + + return mmc_wait_for_cmd(card->host, &io_cmd, 0); +} + +static int ath10k_sdio_func0_cmd52_rd_byte(struct mmc_card *card, + unsigned int address, + unsigned char *byte) +{ + struct mmc_command io_cmd; + int ret; + + memset(&io_cmd, 0, sizeof(io_cmd)); + ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 0, 0, address, 0); + io_cmd.opcode = SD_IO_RW_DIRECT; + io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC; + + ret = mmc_wait_for_cmd(card->host, &io_cmd, 0); + if (!ret) + *byte = io_cmd.resp[0]; + + return ret; +} + +static int ath10k_sdio_config(struct ath10k *ar) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct sdio_func *func = ar_sdio->func; + unsigned char byte, asyncintdelay = 2; + int ret; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio configuration\n"); + + sdio_claim_host(func); + + byte = 0; + ret = ath10k_sdio_func0_cmd52_rd_byte(func->card, + SDIO_CCCR_DRIVE_STRENGTH, + &byte); + + byte &= ~ATH10K_SDIO_DRIVE_DTSX_MASK; + byte |= FIELD_PREP(ATH10K_SDIO_DRIVE_DTSX_MASK, + ATH10K_SDIO_DRIVE_DTSX_TYPE_D); + + ret = ath10k_sdio_func0_cmd52_wr_byte(func->card, + SDIO_CCCR_DRIVE_STRENGTH, + byte); + + byte = 0; + ret = ath10k_sdio_func0_cmd52_rd_byte( + func->card, + CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR, + &byte); + + byte |= (CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A | + CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C | + CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D); + + ret = ath10k_sdio_func0_cmd52_wr_byte(func->card, + CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR, + byte); + if (ret) { + ath10k_warn(ar, "failed to enable driver strength: %d\n", ret); + goto out; + } + + byte = 0; + ret = ath10k_sdio_func0_cmd52_rd_byte(func->card, + CCCR_SDIO_IRQ_MODE_REG_SDIO3, + &byte); + + byte |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3; + + ret = ath10k_sdio_func0_cmd52_wr_byte(func->card, + CCCR_SDIO_IRQ_MODE_REG_SDIO3, + byte); + if (ret) { + ath10k_warn(ar, "failed to enable 4-bit async irq mode: %d\n", + ret); + goto out; + } + + byte = 0; + ret = ath10k_sdio_func0_cmd52_rd_byte(func->card, + CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS, + &byte); + + byte &= ~CCCR_SDIO_ASYNC_INT_DELAY_MASK; + byte |= FIELD_PREP(CCCR_SDIO_ASYNC_INT_DELAY_MASK, asyncintdelay); + + ret = ath10k_sdio_func0_cmd52_wr_byte(func->card, + CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS, + byte); + + /* give us some time to enable, in ms */ + func->enable_timeout = 100; + + ret = sdio_set_block_size(func, ar_sdio->mbox_info.block_size); + if (ret) { + ath10k_warn(ar, "failed to set sdio block size to %d: %d\n", + ar_sdio->mbox_info.block_size, ret); + goto out; + } + +out: + sdio_release_host(func); + return ret; +} + +static int ath10k_sdio_write32(struct ath10k *ar, u32 addr, u32 val) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct sdio_func *func = ar_sdio->func; + int ret; + + sdio_claim_host(func); + + sdio_writel(func, val, addr, &ret); + if (ret) { + ath10k_warn(ar, "failed to write 0x%x to address 0x%x: %d\n", + val, addr, ret); + goto out; + } + + ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write32 addr 0x%x val 0x%x\n", + addr, val); + +out: + sdio_release_host(func); + + return ret; +} + +static int ath10k_sdio_writesb32(struct ath10k *ar, u32 addr, u32 val) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct sdio_func *func = ar_sdio->func; + __le32 *buf; + int ret; + + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + *buf = cpu_to_le32(val); + + sdio_claim_host(func); + + ret = sdio_writesb(func, addr, buf, sizeof(*buf)); + if (ret) { + ath10k_warn(ar, "failed to write value 0x%x to fixed sb address 0x%x: %d\n", + val, addr, ret); + goto out; + } + + ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio writesb32 addr 0x%x val 0x%x\n", + addr, val); + +out: + sdio_release_host(func); + + kfree(buf); + + return ret; +} + +static int ath10k_sdio_read32(struct ath10k *ar, u32 addr, u32 *val) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct sdio_func *func = ar_sdio->func; + int ret; + + sdio_claim_host(func); + *val = sdio_readl(func, addr, &ret); + if (ret) { + ath10k_warn(ar, "failed to read from address 0x%x: %d\n", + addr, ret); + goto out; + } + + ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read32 addr 0x%x val 0x%x\n", + addr, *val); + +out: + sdio_release_host(func); + + return ret; +} + +static int ath10k_sdio_read(struct ath10k *ar, u32 addr, void *buf, size_t len) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct sdio_func *func = ar_sdio->func; + int ret; + + sdio_claim_host(func); + + ret = sdio_memcpy_fromio(func, buf, addr, len); + if (ret) { + ath10k_warn(ar, "failed to read from address 0x%x: %d\n", + addr, ret); + goto out; + } + + ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read addr 0x%x buf 0x%p len %zu\n", + addr, buf, len); + ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio read ", buf, len); + +out: + sdio_release_host(func); + + return ret; +} + +static int ath10k_sdio_write(struct ath10k *ar, u32 addr, const void *buf, size_t len) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct sdio_func *func = ar_sdio->func; + int ret; + + sdio_claim_host(func); + + /* For some reason toio() doesn't have const for the buffer, need + * an ugly hack to workaround that. + */ + ret = sdio_memcpy_toio(func, addr, (void *)buf, len); + if (ret) { + ath10k_warn(ar, "failed to write to address 0x%x: %d\n", + addr, ret); + goto out; + } + + ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write addr 0x%x buf 0x%p len %zu\n", + addr, buf, len); + ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio write ", buf, len); + +out: + sdio_release_host(func); + + return ret; +} + +static int ath10k_sdio_readsb(struct ath10k *ar, u32 addr, void *buf, size_t len) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct sdio_func *func = ar_sdio->func; + int ret; + + sdio_claim_host(func); + + len = round_down(len, ar_sdio->mbox_info.block_size); + + ret = sdio_readsb(func, buf, addr, len); + if (ret) { + ath10k_warn(ar, "failed to read from fixed (sb) address 0x%x: %d\n", + addr, ret); + goto out; + } + + ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio readsb addr 0x%x buf 0x%p len %zu\n", + addr, buf, len); + ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio readsb ", buf, len); + +out: + sdio_release_host(func); + + return ret; +} + +/* HIF mbox functions */ + +static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar, + struct ath10k_sdio_rx_data *pkt, + u32 *lookaheads, + int *n_lookaheads) +{ + struct ath10k_htc *htc = &ar->htc; + struct sk_buff *skb = pkt->skb; + struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data; + bool trailer_present = htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT; + enum ath10k_htc_ep_id eid; + u8 *trailer; + int ret; + + if (trailer_present) { + trailer = skb->data + skb->len - htc_hdr->trailer_len; + + eid = pipe_id_to_eid(htc_hdr->eid); + + ret = ath10k_htc_process_trailer(htc, + trailer, + htc_hdr->trailer_len, + eid, + lookaheads, + n_lookaheads); + if (ret) + return ret; + + if (is_trailer_only_msg(pkt)) + pkt->trailer_only = true; + + skb_trim(skb, skb->len - htc_hdr->trailer_len); + } + + skb_pull(skb, sizeof(*htc_hdr)); + + return 0; +} + +static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar, + u32 lookaheads[], + int *n_lookahead) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct ath10k_htc *htc = &ar->htc; + struct ath10k_sdio_rx_data *pkt; + struct ath10k_htc_ep *ep; + struct ath10k_skb_rxcb *cb; + enum ath10k_htc_ep_id id; + int ret, i, *n_lookahead_local; + u32 *lookaheads_local; + int lookahead_idx = 0; + + for (i = 0; i < ar_sdio->n_rx_pkts; i++) { + lookaheads_local = lookaheads; + n_lookahead_local = n_lookahead; + + id = ((struct ath10k_htc_hdr *) + &lookaheads[lookahead_idx++])->eid; + + if (id >= ATH10K_HTC_EP_COUNT) { + ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n", + id); + ret = -ENOMEM; + goto out; + } + + ep = &htc->endpoint[id]; + + if (ep->service_id == 0) { + ath10k_warn(ar, "ep %d is not connected\n", id); + ret = -ENOMEM; + goto out; + } + + pkt = &ar_sdio->rx_pkts[i]; + + if (pkt->part_of_bundle && !pkt->last_in_bundle) { + /* Only read lookahead's from RX trailers + * for the last packet in a bundle. + */ + lookahead_idx--; + lookaheads_local = NULL; + n_lookahead_local = NULL; + } + + ret = ath10k_sdio_mbox_rx_process_packet(ar, + pkt, + lookaheads_local, + n_lookahead_local); + if (ret) + goto out; + + if (!pkt->trailer_only) { + cb = ATH10K_SKB_RXCB(pkt->skb); + cb->eid = id; + + skb_queue_tail(&ar_sdio->rx_head, pkt->skb); + queue_work(ar->workqueue_aux, + &ar_sdio->async_work_rx); + } else { + kfree_skb(pkt->skb); + } + + /* The RX complete handler now owns the skb...*/ + pkt->skb = NULL; + pkt->alloc_len = 0; + } + + ret = 0; + +out: + /* Free all packets that was not passed on to the RX completion + * handler... + */ + for (; i < ar_sdio->n_rx_pkts; i++) + ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]); + + return ret; +} + +static int ath10k_sdio_mbox_alloc_bundle(struct ath10k *ar, + struct ath10k_sdio_rx_data *rx_pkts, + struct ath10k_htc_hdr *htc_hdr, + size_t full_len, size_t act_len, + size_t *bndl_cnt) +{ + int ret, i; + u8 max_msgs = ar->htc.max_msgs_per_htc_bundle; + + *bndl_cnt = ath10k_htc_get_bundle_count(max_msgs, htc_hdr->flags); + + if (*bndl_cnt > max_msgs) { + ath10k_warn(ar, + "HTC bundle length %u exceeds maximum %u\n", + le16_to_cpu(htc_hdr->len), + max_msgs); + return -ENOMEM; + } + + /* Allocate bndl_cnt extra skb's for the bundle. + * The package containing the + * ATH10K_HTC_FLAG_BUNDLE_MASK flag is not included + * in bndl_cnt. The skb for that packet will be + * allocated separately. + */ + for (i = 0; i < *bndl_cnt; i++) { + ret = ath10k_sdio_mbox_alloc_rx_pkt(&rx_pkts[i], + act_len, + full_len, + true, + false); + if (ret) + return ret; + } + + return 0; +} + +static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar, + u32 lookaheads[], int n_lookaheads) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct ath10k_htc_hdr *htc_hdr; + size_t full_len, act_len; + bool last_in_bundle; + int ret, i; + int pkt_cnt = 0; + + if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) { + ath10k_warn(ar, "the total number of pkts to be fetched (%u) exceeds maximum %u\n", + n_lookaheads, ATH10K_SDIO_MAX_RX_MSGS); + ret = -ENOMEM; + goto err; + } + + for (i = 0; i < n_lookaheads; i++) { + htc_hdr = (struct ath10k_htc_hdr *)&lookaheads[i]; + last_in_bundle = false; + + if (le16_to_cpu(htc_hdr->len) > ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) { + ath10k_warn(ar, "payload length %d exceeds max htc length: %zu\n", + le16_to_cpu(htc_hdr->len), + ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH); + ret = -ENOMEM; + + ath10k_core_start_recovery(ar); + ath10k_warn(ar, "exceeds length, start recovery\n"); + + goto err; + } + + act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr); + full_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, act_len); + + if (full_len > ATH10K_SDIO_MAX_BUFFER_SIZE) { + ath10k_warn(ar, "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n", + htc_hdr->eid, htc_hdr->flags, + le16_to_cpu(htc_hdr->len)); + ret = -EINVAL; + goto err; + } + + if (ath10k_htc_get_bundle_count( + ar->htc.max_msgs_per_htc_bundle, htc_hdr->flags)) { + /* HTC header indicates that every packet to follow + * has the same padded length so that it can be + * optimally fetched as a full bundle. + */ + size_t bndl_cnt; + + ret = ath10k_sdio_mbox_alloc_bundle(ar, + &ar_sdio->rx_pkts[pkt_cnt], + htc_hdr, + full_len, + act_len, + &bndl_cnt); + + if (ret) { + ath10k_warn(ar, "failed to allocate a bundle: %d\n", + ret); + goto err; + } + + pkt_cnt += bndl_cnt; + + /* next buffer will be the last in the bundle */ + last_in_bundle = true; + } + + /* Allocate skb for packet. If the packet had the + * ATH10K_HTC_FLAG_BUNDLE_MASK flag set, all bundled + * packet skb's have been allocated in the previous step. + */ + if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK) + full_len += ATH10K_HIF_MBOX_BLOCK_SIZE; + + ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[pkt_cnt], + act_len, + full_len, + last_in_bundle, + last_in_bundle); + if (ret) { + ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret); + goto err; + } + + pkt_cnt++; + } + + ar_sdio->n_rx_pkts = pkt_cnt; + + return 0; + +err: + for (i = 0; i < ATH10K_SDIO_MAX_RX_MSGS; i++) { + if (!ar_sdio->rx_pkts[i].alloc_len) + break; + ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]); + } + + return ret; +} + +static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct ath10k_sdio_rx_data *pkt = &ar_sdio->rx_pkts[0]; + struct sk_buff *skb = pkt->skb; + struct ath10k_htc_hdr *htc_hdr; + int ret; + + ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr, + skb->data, pkt->alloc_len); + if (ret) + goto err; + + htc_hdr = (struct ath10k_htc_hdr *)skb->data; + pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr); + + if (pkt->act_len > pkt->alloc_len) { + ret = -EINVAL; + goto err; + } + + skb_put(skb, pkt->act_len); + return 0; + +err: + ar_sdio->n_rx_pkts = 0; + ath10k_sdio_mbox_free_rx_pkt(pkt); + + return ret; +} + +static int ath10k_sdio_mbox_rx_fetch_bundle(struct ath10k *ar) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct ath10k_sdio_rx_data *pkt; + struct ath10k_htc_hdr *htc_hdr; + int ret, i; + u32 pkt_offset, virt_pkt_len; + + virt_pkt_len = 0; + for (i = 0; i < ar_sdio->n_rx_pkts; i++) + virt_pkt_len += ar_sdio->rx_pkts[i].alloc_len; + + if (virt_pkt_len > ATH10K_SDIO_VSG_BUF_SIZE) { + ath10k_warn(ar, "sdio vsg buffer size limit: %d\n", virt_pkt_len); + ret = -E2BIG; + goto err; + } + + ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr, + ar_sdio->vsg_buffer, virt_pkt_len); + if (ret) { + ath10k_warn(ar, "failed to read bundle packets: %d", ret); + goto err; + } + + pkt_offset = 0; + for (i = 0; i < ar_sdio->n_rx_pkts; i++) { + pkt = &ar_sdio->rx_pkts[i]; + htc_hdr = (struct ath10k_htc_hdr *)(ar_sdio->vsg_buffer + pkt_offset); + pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr); + + if (pkt->act_len > pkt->alloc_len) { + ret = -EINVAL; + goto err; + } + + skb_put_data(pkt->skb, htc_hdr, pkt->act_len); + pkt_offset += pkt->alloc_len; + } + + return 0; + +err: + /* Free all packets that was not successfully fetched. */ + for (i = 0; i < ar_sdio->n_rx_pkts; i++) + ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]); + + ar_sdio->n_rx_pkts = 0; + + return ret; +} + +/* This is the timeout for mailbox processing done in the sdio irq + * handler. The timeout is deliberately set quite high since SDIO dump logs + * over serial port can/will add a substantial overhead to the processing + * (if enabled). + */ +#define SDIO_MBOX_PROCESSING_TIMEOUT_HZ (20 * HZ) + +static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar, + u32 msg_lookahead, bool *done) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + u32 lookaheads[ATH10K_SDIO_MAX_RX_MSGS]; + int n_lookaheads = 1; + unsigned long timeout; + int ret; + + *done = true; + + /* Copy the lookahead obtained from the HTC register table into our + * temp array as a start value. + */ + lookaheads[0] = msg_lookahead; + + timeout = jiffies + SDIO_MBOX_PROCESSING_TIMEOUT_HZ; + do { + /* Try to allocate as many HTC RX packets indicated by + * n_lookaheads. + */ + ret = ath10k_sdio_mbox_rx_alloc(ar, lookaheads, + n_lookaheads); + if (ret) + break; + + if (ar_sdio->n_rx_pkts >= 2) + /* A recv bundle was detected, force IRQ status + * re-check again. + */ + *done = false; + + if (ar_sdio->n_rx_pkts > 1) + ret = ath10k_sdio_mbox_rx_fetch_bundle(ar); + else + ret = ath10k_sdio_mbox_rx_fetch(ar); + + /* Process fetched packets. This will potentially update + * n_lookaheads depending on if the packets contain lookahead + * reports. + */ + n_lookaheads = 0; + ret = ath10k_sdio_mbox_rx_process_packets(ar, + lookaheads, + &n_lookaheads); + + if (!n_lookaheads || ret) + break; + + /* For SYNCH processing, if we get here, we are running + * through the loop again due to updated lookaheads. Set + * flag that we should re-check IRQ status registers again + * before leaving IRQ processing, this can net better + * performance in high throughput situations. + */ + *done = false; + } while (time_before(jiffies, timeout)); + + if (ret && (ret != -ECANCELED)) + ath10k_warn(ar, "failed to get pending recv messages: %d\n", + ret); + + return ret; +} + +static int ath10k_sdio_mbox_proc_dbg_intr(struct ath10k *ar) +{ + u32 val; + int ret; + + /* TODO: Add firmware crash handling */ + ath10k_warn(ar, "firmware crashed\n"); + + /* read counter to clear the interrupt, the debug error interrupt is + * counter 0. + */ + ret = ath10k_sdio_read32(ar, MBOX_COUNT_DEC_ADDRESS, &val); + if (ret) + ath10k_warn(ar, "failed to clear debug interrupt: %d\n", ret); + + return ret; +} + +static int ath10k_sdio_mbox_proc_counter_intr(struct ath10k *ar) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; + u8 counter_int_status; + int ret; + + mutex_lock(&irq_data->mtx); + counter_int_status = irq_data->irq_proc_reg->counter_int_status & + irq_data->irq_en_reg->cntr_int_status_en; + + /* NOTE: other modules like GMBOX may use the counter interrupt for + * credit flow control on other counters, we only need to check for + * the debug assertion counter interrupt. + */ + if (counter_int_status & ATH10K_SDIO_TARGET_DEBUG_INTR_MASK) + ret = ath10k_sdio_mbox_proc_dbg_intr(ar); + else + ret = 0; + + mutex_unlock(&irq_data->mtx); + + return ret; +} + +static int ath10k_sdio_mbox_proc_err_intr(struct ath10k *ar) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; + u8 error_int_status; + int ret; + + ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio error interrupt\n"); + + error_int_status = irq_data->irq_proc_reg->error_int_status & 0x0F; + if (!error_int_status) { + ath10k_warn(ar, "invalid error interrupt status: 0x%x\n", + error_int_status); + return -EIO; + } + + ath10k_dbg(ar, ATH10K_DBG_SDIO, + "sdio error_int_status 0x%x\n", error_int_status); + + if (FIELD_GET(MBOX_ERROR_INT_STATUS_WAKEUP_MASK, + error_int_status)) + ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio interrupt error wakeup\n"); + + if (FIELD_GET(MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_MASK, + error_int_status)) + ath10k_warn(ar, "rx underflow interrupt error\n"); + + if (FIELD_GET(MBOX_ERROR_INT_STATUS_TX_OVERFLOW_MASK, + error_int_status)) + ath10k_warn(ar, "tx overflow interrupt error\n"); + + /* Clear the interrupt */ + irq_data->irq_proc_reg->error_int_status &= ~error_int_status; + + /* set W1C value to clear the interrupt, this hits the register first */ + ret = ath10k_sdio_writesb32(ar, MBOX_ERROR_INT_STATUS_ADDRESS, + error_int_status); + if (ret) { + ath10k_warn(ar, "unable to write to error int status address: %d\n", + ret); + return ret; + } + + return 0; +} + +static int ath10k_sdio_mbox_proc_cpu_intr(struct ath10k *ar) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; + u8 cpu_int_status; + int ret; + + mutex_lock(&irq_data->mtx); + cpu_int_status = irq_data->irq_proc_reg->cpu_int_status & + irq_data->irq_en_reg->cpu_int_status_en; + if (!cpu_int_status) { + ath10k_warn(ar, "CPU interrupt status is zero\n"); + ret = -EIO; + goto out; + } + + /* Clear the interrupt */ + irq_data->irq_proc_reg->cpu_int_status &= ~cpu_int_status; + + /* Set up the register transfer buffer to hit the register 4 times, + * this is done to make the access 4-byte aligned to mitigate issues + * with host bus interconnects that restrict bus transfer lengths to + * be a multiple of 4-bytes. + * + * Set W1C value to clear the interrupt, this hits the register first. + */ + ret = ath10k_sdio_writesb32(ar, MBOX_CPU_INT_STATUS_ADDRESS, + cpu_int_status); + if (ret) { + ath10k_warn(ar, "unable to write to cpu interrupt status address: %d\n", + ret); + goto out; + } + +out: + mutex_unlock(&irq_data->mtx); + if (cpu_int_status & MBOX_CPU_STATUS_ENABLE_ASSERT_MASK) + ath10k_sdio_fw_crashed_dump(ar); + + return ret; +} + +static int ath10k_sdio_mbox_read_int_status(struct ath10k *ar, + u8 *host_int_status, + u32 *lookahead) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; + struct ath10k_sdio_irq_proc_regs *irq_proc_reg = irq_data->irq_proc_reg; + struct ath10k_sdio_irq_enable_regs *irq_en_reg = irq_data->irq_en_reg; + u8 htc_mbox = FIELD_PREP(ATH10K_HTC_MAILBOX_MASK, 1); + int ret; + + mutex_lock(&irq_data->mtx); + + *lookahead = 0; + *host_int_status = 0; + + /* int_status_en is supposed to be non zero, otherwise interrupts + * shouldn't be enabled. There is however a short time frame during + * initialization between the irq register and int_status_en init + * where this can happen. + * We silently ignore this condition. + */ + if (!irq_en_reg->int_status_en) { + ret = 0; + goto out; + } + + /* Read the first sizeof(struct ath10k_irq_proc_registers) + * bytes of the HTC register table. This + * will yield us the value of different int status + * registers and the lookahead registers. + */ + ret = ath10k_sdio_read(ar, MBOX_HOST_INT_STATUS_ADDRESS, + irq_proc_reg, sizeof(*irq_proc_reg)); + if (ret) { + ath10k_core_start_recovery(ar); + ath10k_warn(ar, "read int status fail, start recovery\n"); + goto out; + } + + /* Update only those registers that are enabled */ + *host_int_status = irq_proc_reg->host_int_status & + irq_en_reg->int_status_en; + + /* Look at mbox status */ + if (!(*host_int_status & htc_mbox)) { + *lookahead = 0; + ret = 0; + goto out; + } + + /* Mask out pending mbox value, we use look ahead as + * the real flag for mbox processing. + */ + *host_int_status &= ~htc_mbox; + if (irq_proc_reg->rx_lookahead_valid & htc_mbox) { + *lookahead = le32_to_cpu( + irq_proc_reg->rx_lookahead[ATH10K_HTC_MAILBOX]); + if (!*lookahead) + ath10k_warn(ar, "sdio mbox lookahead is zero\n"); + } + +out: + mutex_unlock(&irq_data->mtx); + return ret; +} + +static int ath10k_sdio_mbox_proc_pending_irqs(struct ath10k *ar, + bool *done) +{ + u8 host_int_status; + u32 lookahead; + int ret; + + /* NOTE: HIF implementation guarantees that the context of this + * call allows us to perform SYNCHRONOUS I/O, that is we can block, + * sleep or call any API that can block or switch thread/task + * contexts. This is a fully schedulable context. + */ + + ret = ath10k_sdio_mbox_read_int_status(ar, + &host_int_status, + &lookahead); + if (ret) { + *done = true; + goto out; + } + + if (!host_int_status && !lookahead) { + ret = 0; + *done = true; + goto out; + } + + if (lookahead) { + ath10k_dbg(ar, ATH10K_DBG_SDIO, + "sdio pending mailbox msg lookahead 0x%08x\n", + lookahead); + + ret = ath10k_sdio_mbox_rxmsg_pending_handler(ar, + lookahead, + done); + if (ret) + goto out; + } + + /* now, handle the rest of the interrupts */ + ath10k_dbg(ar, ATH10K_DBG_SDIO, + "sdio host_int_status 0x%x\n", host_int_status); + + if (FIELD_GET(MBOX_HOST_INT_STATUS_CPU_MASK, host_int_status)) { + /* CPU Interrupt */ + ret = ath10k_sdio_mbox_proc_cpu_intr(ar); + if (ret) + goto out; + } + + if (FIELD_GET(MBOX_HOST_INT_STATUS_ERROR_MASK, host_int_status)) { + /* Error Interrupt */ + ret = ath10k_sdio_mbox_proc_err_intr(ar); + if (ret) + goto out; + } + + if (FIELD_GET(MBOX_HOST_INT_STATUS_COUNTER_MASK, host_int_status)) + /* Counter Interrupt */ + ret = ath10k_sdio_mbox_proc_counter_intr(ar); + + ret = 0; + +out: + /* An optimization to bypass reading the IRQ status registers + * unnecessarily which can re-wake the target, if upper layers + * determine that we are in a low-throughput mode, we can rely on + * taking another interrupt rather than re-checking the status + * registers which can re-wake the target. + * + * NOTE : for host interfaces that makes use of detecting pending + * mbox messages at hif can not use this optimization due to + * possible side effects, SPI requires the host to drain all + * messages from the mailbox before exiting the ISR routine. + */ + + ath10k_dbg(ar, ATH10K_DBG_SDIO, + "sdio pending irqs done %d status %d", + *done, ret); + + return ret; +} + +static void ath10k_sdio_set_mbox_info(struct ath10k *ar) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info; + u16 device = ar_sdio->func->device, dev_id_base, dev_id_chiprev; + + mbox_info->htc_addr = ATH10K_HIF_MBOX_BASE_ADDR; + mbox_info->block_size = ATH10K_HIF_MBOX_BLOCK_SIZE; + mbox_info->block_mask = ATH10K_HIF_MBOX_BLOCK_SIZE - 1; + mbox_info->gmbox_addr = ATH10K_HIF_GMBOX_BASE_ADDR; + mbox_info->gmbox_sz = ATH10K_HIF_GMBOX_WIDTH; + + mbox_info->ext_info[0].htc_ext_addr = ATH10K_HIF_MBOX0_EXT_BASE_ADDR; + + dev_id_base = (device & 0x0F00); + dev_id_chiprev = (device & 0x00FF); + switch (dev_id_base) { + case (SDIO_DEVICE_ID_ATHEROS_AR6005 & 0x0F00): + if (dev_id_chiprev < 4) + mbox_info->ext_info[0].htc_ext_sz = + ATH10K_HIF_MBOX0_EXT_WIDTH; + else + /* from QCA6174 2.0(0x504), the width has been extended + * to 56K + */ + mbox_info->ext_info[0].htc_ext_sz = + ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0; + break; + case (SDIO_DEVICE_ID_ATHEROS_QCA9377 & 0x0F00): + mbox_info->ext_info[0].htc_ext_sz = + ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0; + break; + default: + mbox_info->ext_info[0].htc_ext_sz = + ATH10K_HIF_MBOX0_EXT_WIDTH; + } + + mbox_info->ext_info[1].htc_ext_addr = + mbox_info->ext_info[0].htc_ext_addr + + mbox_info->ext_info[0].htc_ext_sz + + ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE; + mbox_info->ext_info[1].htc_ext_sz = ATH10K_HIF_MBOX1_EXT_WIDTH; +} + +/* BMI functions */ + +static int ath10k_sdio_bmi_credits(struct ath10k *ar) +{ + u32 addr, cmd_credits; + unsigned long timeout; + int ret; + + /* Read the counter register to get the command credits */ + addr = MBOX_COUNT_DEC_ADDRESS + ATH10K_HIF_MBOX_NUM_MAX * 4; + timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ; + cmd_credits = 0; + + while (time_before(jiffies, timeout) && !cmd_credits) { + /* Hit the credit counter with a 4-byte access, the first byte + * read will hit the counter and cause a decrement, while the + * remaining 3 bytes has no effect. The rationale behind this + * is to make all HIF accesses 4-byte aligned. + */ + ret = ath10k_sdio_read32(ar, addr, &cmd_credits); + if (ret) { + ath10k_warn(ar, + "unable to decrement the command credit count register: %d\n", + ret); + return ret; + } + + /* The counter is only 8 bits. + * Ignore anything in the upper 3 bytes + */ + cmd_credits &= 0xFF; + } + + if (!cmd_credits) { + ath10k_warn(ar, "bmi communication timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int ath10k_sdio_bmi_get_rx_lookahead(struct ath10k *ar) +{ + unsigned long timeout; + u32 rx_word; + int ret; + + timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ; + rx_word = 0; + + while ((time_before(jiffies, timeout)) && !rx_word) { + ret = ath10k_sdio_read32(ar, + MBOX_HOST_INT_STATUS_ADDRESS, + &rx_word); + if (ret) { + ath10k_warn(ar, "unable to read RX_LOOKAHEAD_VALID: %d\n", ret); + return ret; + } + + /* all we really want is one bit */ + rx_word &= 1; + } + + if (!rx_word) { + ath10k_warn(ar, "bmi_recv_buf FIFO empty\n"); + return -EINVAL; + } + + return ret; +} + +static int ath10k_sdio_bmi_exchange_msg(struct ath10k *ar, + void *req, u32 req_len, + void *resp, u32 *resp_len) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + u32 addr; + int ret; + + if (req) { + ret = ath10k_sdio_bmi_credits(ar); + if (ret) + return ret; + + addr = ar_sdio->mbox_info.htc_addr; + + memcpy(ar_sdio->bmi_buf, req, req_len); + ret = ath10k_sdio_write(ar, addr, ar_sdio->bmi_buf, req_len); + if (ret) { + ath10k_warn(ar, + "unable to send the bmi data to the device: %d\n", + ret); + return ret; + } + } + + if (!resp || !resp_len) + /* No response expected */ + return 0; + + /* During normal bootup, small reads may be required. + * Rather than issue an HIF Read and then wait as the Target + * adds successive bytes to the FIFO, we wait here until + * we know that response data is available. + * + * This allows us to cleanly timeout on an unexpected + * Target failure rather than risk problems at the HIF level. + * In particular, this avoids SDIO timeouts and possibly garbage + * data on some host controllers. And on an interconnect + * such as Compact Flash (as well as some SDIO masters) which + * does not provide any indication on data timeout, it avoids + * a potential hang or garbage response. + * + * Synchronization is more difficult for reads larger than the + * size of the MBOX FIFO (128B), because the Target is unable + * to push the 129th byte of data until AFTER the Host posts an + * HIF Read and removes some FIFO data. So for large reads the + * Host proceeds to post an HIF Read BEFORE all the data is + * actually available to read. Fortunately, large BMI reads do + * not occur in practice -- they're supported for debug/development. + * + * So Host/Target BMI synchronization is divided into these cases: + * CASE 1: length < 4 + * Should not happen + * + * CASE 2: 4 <= length <= 128 + * Wait for first 4 bytes to be in FIFO + * If CONSERVATIVE_BMI_READ is enabled, also wait for + * a BMI command credit, which indicates that the ENTIRE + * response is available in the FIFO + * + * CASE 3: length > 128 + * Wait for the first 4 bytes to be in FIFO + * + * For most uses, a small timeout should be sufficient and we will + * usually see a response quickly; but there may be some unusual + * (debug) cases of BMI_EXECUTE where we want an larger timeout. + * For now, we use an unbounded busy loop while waiting for + * BMI_EXECUTE. + * + * If BMI_EXECUTE ever needs to support longer-latency execution, + * especially in production, this code needs to be enhanced to sleep + * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently + * a function of Host processor speed. + */ + ret = ath10k_sdio_bmi_get_rx_lookahead(ar); + if (ret) + return ret; + + /* We always read from the start of the mbox address */ + addr = ar_sdio->mbox_info.htc_addr; + ret = ath10k_sdio_read(ar, addr, ar_sdio->bmi_buf, *resp_len); + if (ret) { + ath10k_warn(ar, + "unable to read the bmi data from the device: %d\n", + ret); + return ret; + } + + memcpy(resp, ar_sdio->bmi_buf, *resp_len); + + return 0; +} + +/* sdio async handling functions */ + +static struct ath10k_sdio_bus_request +*ath10k_sdio_alloc_busreq(struct ath10k *ar) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct ath10k_sdio_bus_request *bus_req; + + spin_lock_bh(&ar_sdio->lock); + + if (list_empty(&ar_sdio->bus_req_freeq)) { + bus_req = NULL; + goto out; + } + + bus_req = list_first_entry(&ar_sdio->bus_req_freeq, + struct ath10k_sdio_bus_request, list); + list_del(&bus_req->list); + +out: + spin_unlock_bh(&ar_sdio->lock); + return bus_req; +} + +static void ath10k_sdio_free_bus_req(struct ath10k *ar, + struct ath10k_sdio_bus_request *bus_req) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + + memset(bus_req, 0, sizeof(*bus_req)); + + spin_lock_bh(&ar_sdio->lock); + list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq); + spin_unlock_bh(&ar_sdio->lock); +} + +static void __ath10k_sdio_write_async(struct ath10k *ar, + struct ath10k_sdio_bus_request *req) +{ + struct ath10k_htc_ep *ep; + struct sk_buff *skb; + int ret; + + skb = req->skb; + ret = ath10k_sdio_write(ar, req->address, skb->data, skb->len); + if (ret) + ath10k_warn(ar, "failed to write skb to 0x%x asynchronously: %d", + req->address, ret); + + if (req->htc_msg) { + ep = &ar->htc.endpoint[req->eid]; + ath10k_htc_notify_tx_completion(ep, skb); + } else if (req->comp) { + complete(req->comp); + } + + ath10k_sdio_free_bus_req(ar, req); +} + +/* To improve throughput use workqueue to deliver packets to HTC layer, + * this way SDIO bus is utilised much better. + */ +static void ath10k_rx_indication_async_work(struct work_struct *work) +{ + struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio, + async_work_rx); + struct ath10k *ar = ar_sdio->ar; + struct ath10k_htc_ep *ep; + struct ath10k_skb_rxcb *cb; + struct sk_buff *skb; + + while (true) { + skb = skb_dequeue(&ar_sdio->rx_head); + if (!skb) + break; + cb = ATH10K_SKB_RXCB(skb); + ep = &ar->htc.endpoint[cb->eid]; + ep->ep_ops.ep_rx_complete(ar, skb); + } + + if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) { + local_bh_disable(); + napi_schedule(&ar->napi); + local_bh_enable(); + } +} + +static int ath10k_sdio_read_rtc_state(struct ath10k_sdio *ar_sdio, unsigned char *state) +{ + struct ath10k *ar = ar_sdio->ar; + unsigned char rtc_state = 0; + int ret = 0; + + rtc_state = sdio_f0_readb(ar_sdio->func, ATH10K_CIS_RTC_STATE_ADDR, &ret); + if (ret) { + ath10k_warn(ar, "failed to read rtc state: %d\n", ret); + return ret; + } + + *state = rtc_state & 0x3; + + return ret; +} + +static int ath10k_sdio_set_mbox_sleep(struct ath10k *ar, bool enable_sleep) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + u32 val; + int retry = ATH10K_CIS_READ_RETRY, ret = 0; + unsigned char rtc_state = 0; + + sdio_claim_host(ar_sdio->func); + + ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val); + if (ret) { + ath10k_warn(ar, "failed to read fifo/chip control register: %d\n", + ret); + goto release; + } + + if (enable_sleep) { + val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF; + ar_sdio->mbox_state = SDIO_MBOX_SLEEP_STATE; + } else { + val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON; + ar_sdio->mbox_state = SDIO_MBOX_AWAKE_STATE; + } + + ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val); + if (ret) { + ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d", + ret); + } + + if (!enable_sleep) { + do { + udelay(ATH10K_CIS_READ_WAIT_4_RTC_CYCLE_IN_US); + ret = ath10k_sdio_read_rtc_state(ar_sdio, &rtc_state); + + if (ret) { + ath10k_warn(ar, "failed to disable mbox sleep: %d", ret); + break; + } + + ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read rtc state: %d\n", + rtc_state); + + if (rtc_state == ATH10K_CIS_RTC_STATE_ON) + break; + + udelay(ATH10K_CIS_XTAL_SETTLE_DURATION_IN_US); + retry--; + } while (retry > 0); + } + +release: + sdio_release_host(ar_sdio->func); + + return ret; +} + +static void ath10k_sdio_sleep_timer_handler(struct timer_list *t) +{ + struct ath10k_sdio *ar_sdio = timer_container_of(ar_sdio, t, + sleep_timer); + + ar_sdio->mbox_state = SDIO_MBOX_REQUEST_TO_SLEEP_STATE; + queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work); +} + +static void ath10k_sdio_write_async_work(struct work_struct *work) +{ + struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio, + wr_async_work); + struct ath10k *ar = ar_sdio->ar; + struct ath10k_sdio_bus_request *req, *tmp_req; + struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info; + + spin_lock_bh(&ar_sdio->wr_async_lock); + + list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { + list_del(&req->list); + spin_unlock_bh(&ar_sdio->wr_async_lock); + + if (req->address >= mbox_info->htc_addr && + ar_sdio->mbox_state == SDIO_MBOX_SLEEP_STATE) { + ath10k_sdio_set_mbox_sleep(ar, false); + mod_timer(&ar_sdio->sleep_timer, jiffies + + msecs_to_jiffies(ATH10K_MIN_SLEEP_INACTIVITY_TIME_MS)); + } + + __ath10k_sdio_write_async(ar, req); + spin_lock_bh(&ar_sdio->wr_async_lock); + } + + spin_unlock_bh(&ar_sdio->wr_async_lock); + + if (ar_sdio->mbox_state == SDIO_MBOX_REQUEST_TO_SLEEP_STATE) + ath10k_sdio_set_mbox_sleep(ar, true); +} + +static int ath10k_sdio_prep_async_req(struct ath10k *ar, u32 addr, + struct sk_buff *skb, + struct completion *comp, + bool htc_msg, enum ath10k_htc_ep_id eid) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct ath10k_sdio_bus_request *bus_req; + + /* Allocate a bus request for the message and queue it on the + * SDIO workqueue. + */ + bus_req = ath10k_sdio_alloc_busreq(ar); + if (!bus_req) { + ath10k_warn(ar, + "unable to allocate bus request for async request\n"); + return -ENOMEM; + } + + bus_req->skb = skb; + bus_req->eid = eid; + bus_req->address = addr; + bus_req->htc_msg = htc_msg; + bus_req->comp = comp; + + spin_lock_bh(&ar_sdio->wr_async_lock); + list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq); + spin_unlock_bh(&ar_sdio->wr_async_lock); + + return 0; +} + +/* IRQ handler */ + +static void ath10k_sdio_irq_handler(struct sdio_func *func) +{ + struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func); + struct ath10k *ar = ar_sdio->ar; + unsigned long timeout; + bool done = false; + int ret; + + /* Release the host during interrupts so we can pick it back up when + * we process commands. + */ + sdio_release_host(ar_sdio->func); + + timeout = jiffies + ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ; + do { + ret = ath10k_sdio_mbox_proc_pending_irqs(ar, &done); + if (ret) + break; + } while (time_before(jiffies, timeout) && !done); + + ath10k_mac_tx_push_pending(ar); + + sdio_claim_host(ar_sdio->func); + + if (ret && ret != -ECANCELED) + ath10k_warn(ar, "failed to process pending SDIO interrupts: %d\n", + ret); +} + +/* sdio HIF functions */ + +static int ath10k_sdio_disable_intrs(struct ath10k *ar) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; + struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg; + int ret; + + mutex_lock(&irq_data->mtx); + + memset(regs, 0, sizeof(*regs)); + ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS, + ®s->int_status_en, sizeof(*regs)); + if (ret) + ath10k_warn(ar, "unable to disable sdio interrupts: %d\n", ret); + + mutex_unlock(&irq_data->mtx); + + return ret; +} + +static int ath10k_sdio_hif_power_up(struct ath10k *ar, + enum ath10k_firmware_mode fw_mode) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct sdio_func *func = ar_sdio->func; + int ret; + + if (!ar_sdio->is_disabled) + return 0; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n"); + + ret = ath10k_sdio_config(ar); + if (ret) { + ath10k_err(ar, "failed to config sdio: %d\n", ret); + return ret; + } + + sdio_claim_host(func); + + ret = sdio_enable_func(func); + if (ret) { + ath10k_warn(ar, "unable to enable sdio function: %d)\n", ret); + sdio_release_host(func); + return ret; + } + + sdio_release_host(func); + + /* Wait for hardware to initialise. It should take a lot less than + * 20 ms but let's be conservative here. + */ + msleep(20); + + ar_sdio->is_disabled = false; + + ret = ath10k_sdio_disable_intrs(ar); + if (ret) + return ret; + + return 0; +} + +static void ath10k_sdio_hif_power_down(struct ath10k *ar) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + int ret; + + if (ar_sdio->is_disabled) + return; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n"); + + timer_delete_sync(&ar_sdio->sleep_timer); + ath10k_sdio_set_mbox_sleep(ar, true); + + /* Disable the card */ + sdio_claim_host(ar_sdio->func); + + ret = sdio_disable_func(ar_sdio->func); + if (ret) { + ath10k_warn(ar, "unable to disable sdio function: %d\n", ret); + sdio_release_host(ar_sdio->func); + return; + } + + ret = mmc_hw_reset(ar_sdio->func->card); + if (ret) + ath10k_warn(ar, "unable to reset sdio: %d\n", ret); + + sdio_release_host(ar_sdio->func); + + ar_sdio->is_disabled = true; +} + +static int ath10k_sdio_hif_tx_sg(struct ath10k *ar, u8 pipe_id, + struct ath10k_hif_sg_item *items, int n_items) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + enum ath10k_htc_ep_id eid; + struct sk_buff *skb; + int ret, i; + + eid = pipe_id_to_eid(pipe_id); + + for (i = 0; i < n_items; i++) { + size_t padded_len; + u32 address; + + skb = items[i].transfer_context; + padded_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, + skb->len); + skb_trim(skb, padded_len); + + /* Write TX data to the end of the mbox address space */ + address = ar_sdio->mbox_addr[eid] + ar_sdio->mbox_size[eid] - + skb->len; + ret = ath10k_sdio_prep_async_req(ar, address, skb, + NULL, true, eid); + if (ret) + return ret; + } + + queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work); + + return 0; +} + +static int ath10k_sdio_enable_intrs(struct ath10k *ar) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; + struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg; + int ret; + + mutex_lock(&irq_data->mtx); + + /* Enable all but CPU interrupts */ + regs->int_status_en = FIELD_PREP(MBOX_INT_STATUS_ENABLE_ERROR_MASK, 1) | + FIELD_PREP(MBOX_INT_STATUS_ENABLE_CPU_MASK, 1) | + FIELD_PREP(MBOX_INT_STATUS_ENABLE_COUNTER_MASK, 1); + + /* NOTE: There are some cases where HIF can do detection of + * pending mbox messages which is disabled now. + */ + regs->int_status_en |= + FIELD_PREP(MBOX_INT_STATUS_ENABLE_MBOX_DATA_MASK, 1); + + /* Set up the CPU Interrupt Status Register, enable CPU sourced interrupt #0 + * #0 is used for report assertion from target + */ + regs->cpu_int_status_en = FIELD_PREP(MBOX_CPU_STATUS_ENABLE_ASSERT_MASK, 1); + + /* Set up the Error Interrupt status Register */ + regs->err_int_status_en = + FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, 1) | + FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, 1); + + /* Enable Counter interrupt status register to get fatal errors for + * debugging. + */ + regs->cntr_int_status_en = + FIELD_PREP(MBOX_COUNTER_INT_STATUS_ENABLE_BIT_MASK, + ATH10K_SDIO_TARGET_DEBUG_INTR_MASK); + + ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS, + ®s->int_status_en, sizeof(*regs)); + if (ret) + ath10k_warn(ar, + "failed to update mbox interrupt status register : %d\n", + ret); + + mutex_unlock(&irq_data->mtx); + return ret; +} + +/* HIF diagnostics */ + +static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf, + size_t buf_len) +{ + int ret; + void *mem; + + mem = kzalloc(buf_len, GFP_KERNEL); + if (!mem) + return -ENOMEM; + + /* set window register to start read cycle */ + ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address); + if (ret) { + ath10k_warn(ar, "failed to set mbox window read address: %d", ret); + goto out; + } + + /* read the data */ + ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, mem, buf_len); + if (ret) { + ath10k_warn(ar, "failed to read from mbox window data address: %d\n", + ret); + goto out; + } + + memcpy(buf, mem, buf_len); + +out: + kfree(mem); + + return ret; +} + +static int ath10k_sdio_diag_read32(struct ath10k *ar, u32 address, + u32 *value) +{ + __le32 *val; + int ret; + + val = kzalloc(sizeof(*val), GFP_KERNEL); + if (!val) + return -ENOMEM; + + ret = ath10k_sdio_hif_diag_read(ar, address, val, sizeof(*val)); + if (ret) + goto out; + + *value = __le32_to_cpu(*val); + +out: + kfree(val); + + return ret; +} + +static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address, + const void *data, int nbytes) +{ + int ret; + + /* set write data */ + ret = ath10k_sdio_write(ar, MBOX_WINDOW_DATA_ADDRESS, data, nbytes); + if (ret) { + ath10k_warn(ar, + "failed to write 0x%p to mbox window data address: %d\n", + data, ret); + return ret; + } + + /* set window register, which starts the write cycle */ + ret = ath10k_sdio_write32(ar, MBOX_WINDOW_WRITE_ADDR_ADDRESS, address); + if (ret) { + ath10k_warn(ar, "failed to set mbox window write address: %d", ret); + return ret; + } + + return 0; +} + +static int ath10k_sdio_hif_start_post(struct ath10k *ar) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + u32 addr, val; + int ret = 0; + + addr = host_interest_item_address(HI_ITEM(hi_acs_flags)); + + ret = ath10k_sdio_diag_read32(ar, addr, &val); + if (ret) { + ath10k_warn(ar, "unable to read hi_acs_flags : %d\n", ret); + return ret; + } + + if (val & HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK) { + ath10k_dbg(ar, ATH10K_DBG_SDIO, + "sdio mailbox swap service enabled\n"); + ar_sdio->swap_mbox = true; + } else { + ath10k_dbg(ar, ATH10K_DBG_SDIO, + "sdio mailbox swap service disabled\n"); + ar_sdio->swap_mbox = false; + } + + ath10k_sdio_set_mbox_sleep(ar, true); + + return 0; +} + +static int ath10k_sdio_get_htt_tx_complete(struct ath10k *ar) +{ + u32 addr, val; + int ret; + + addr = host_interest_item_address(HI_ITEM(hi_acs_flags)); + + ret = ath10k_sdio_diag_read32(ar, addr, &val); + if (ret) { + ath10k_warn(ar, + "unable to read hi_acs_flags for htt tx complete: %d\n", ret); + return ret; + } + + ret = (val & HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_FW_ACK); + + ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio reduce tx complete fw%sack\n", + ret ? " " : " not "); + + return ret; +} + +/* HIF start/stop */ + +static int ath10k_sdio_hif_start(struct ath10k *ar) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + int ret; + + ath10k_core_napi_enable(ar); + + /* Sleep 20 ms before HIF interrupts are disabled. + * This will give target plenty of time to process the BMI done + * request before interrupts are disabled. + */ + msleep(20); + ret = ath10k_sdio_disable_intrs(ar); + if (ret) + return ret; + + /* eid 0 always uses the lower part of the extended mailbox address + * space (ext_info[0].htc_ext_addr). + */ + ar_sdio->mbox_addr[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_addr; + ar_sdio->mbox_size[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_sz; + + sdio_claim_host(ar_sdio->func); + + /* Register the isr */ + ret = sdio_claim_irq(ar_sdio->func, ath10k_sdio_irq_handler); + if (ret) { + ath10k_warn(ar, "failed to claim sdio interrupt: %d\n", ret); + sdio_release_host(ar_sdio->func); + return ret; + } + + sdio_release_host(ar_sdio->func); + + ret = ath10k_sdio_enable_intrs(ar); + if (ret) + ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret); + + /* Enable sleep and then disable it again */ + ret = ath10k_sdio_set_mbox_sleep(ar, true); + if (ret) + return ret; + + /* Wait for 20ms for the written value to take effect */ + msleep(20); + + ret = ath10k_sdio_set_mbox_sleep(ar, false); + if (ret) + return ret; + + return 0; +} + +#define SDIO_IRQ_DISABLE_TIMEOUT_HZ (3 * HZ) + +static void ath10k_sdio_irq_disable(struct ath10k *ar) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; + struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg; + struct sk_buff *skb; + struct completion irqs_disabled_comp; + int ret; + + skb = dev_alloc_skb(sizeof(*regs)); + if (!skb) + return; + + mutex_lock(&irq_data->mtx); + + memset(regs, 0, sizeof(*regs)); /* disable all interrupts */ + memcpy(skb->data, regs, sizeof(*regs)); + skb_put(skb, sizeof(*regs)); + + mutex_unlock(&irq_data->mtx); + + init_completion(&irqs_disabled_comp); + ret = ath10k_sdio_prep_async_req(ar, MBOX_INT_STATUS_ENABLE_ADDRESS, + skb, &irqs_disabled_comp, false, 0); + if (ret) + goto out; + + queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work); + + /* Wait for the completion of the IRQ disable request. + * If there is a timeout we will try to disable irq's anyway. + */ + ret = wait_for_completion_timeout(&irqs_disabled_comp, + SDIO_IRQ_DISABLE_TIMEOUT_HZ); + if (!ret) + ath10k_warn(ar, "sdio irq disable request timed out\n"); + + sdio_claim_host(ar_sdio->func); + + ret = sdio_release_irq(ar_sdio->func); + if (ret) + ath10k_warn(ar, "failed to release sdio interrupt: %d\n", ret); + + sdio_release_host(ar_sdio->func); + +out: + kfree_skb(skb); +} + +static void ath10k_sdio_hif_stop(struct ath10k *ar) +{ + struct ath10k_sdio_bus_request *req, *tmp_req; + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct sk_buff *skb; + + ath10k_sdio_irq_disable(ar); + + cancel_work_sync(&ar_sdio->async_work_rx); + + while ((skb = skb_dequeue(&ar_sdio->rx_head))) + dev_kfree_skb_any(skb); + + cancel_work_sync(&ar_sdio->wr_async_work); + + spin_lock_bh(&ar_sdio->wr_async_lock); + + /* Free all bus requests that have not been handled */ + list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { + struct ath10k_htc_ep *ep; + + list_del(&req->list); + + if (req->htc_msg) { + ep = &ar->htc.endpoint[req->eid]; + ath10k_htc_notify_tx_completion(ep, req->skb); + } else if (req->skb) { + kfree_skb(req->skb); + } + ath10k_sdio_free_bus_req(ar, req); + } + + spin_unlock_bh(&ar_sdio->wr_async_lock); + + ath10k_core_napi_sync_disable(ar); +} + +#ifdef CONFIG_PM + +static int ath10k_sdio_hif_suspend(struct ath10k *ar) +{ + return 0; +} + +static int ath10k_sdio_hif_resume(struct ath10k *ar) +{ + switch (ar->state) { + case ATH10K_STATE_OFF: + ath10k_dbg(ar, ATH10K_DBG_SDIO, + "sdio resume configuring sdio\n"); + + /* need to set sdio settings after power is cut from sdio */ + ath10k_sdio_config(ar); + break; + + case ATH10K_STATE_ON: + default: + break; + } + + return 0; +} +#endif + +static int ath10k_sdio_hif_map_service_to_pipe(struct ath10k *ar, + u16 service_id, + u8 *ul_pipe, u8 *dl_pipe) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct ath10k_htc *htc = &ar->htc; + u32 htt_addr, wmi_addr, htt_mbox_size, wmi_mbox_size; + enum ath10k_htc_ep_id eid; + bool ep_found = false; + int i; + + /* For sdio, we are interested in the mapping between eid + * and pipeid rather than service_id to pipe_id. + * First we find out which eid has been allocated to the + * service... + */ + for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) { + if (htc->endpoint[i].service_id == service_id) { + eid = htc->endpoint[i].eid; + ep_found = true; + break; + } + } + + if (!ep_found) + return -EINVAL; + + /* Then we create the simplest mapping possible between pipeid + * and eid + */ + *ul_pipe = *dl_pipe = (u8)eid; + + /* Normally, HTT will use the upper part of the extended + * mailbox address space (ext_info[1].htc_ext_addr) and WMI ctrl + * the lower part (ext_info[0].htc_ext_addr). + * If fw wants swapping of mailbox addresses, the opposite is true. + */ + if (ar_sdio->swap_mbox) { + htt_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr; + wmi_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr; + htt_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz; + wmi_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz; + } else { + htt_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr; + wmi_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr; + htt_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz; + wmi_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz; + } + + switch (service_id) { + case ATH10K_HTC_SVC_ID_RSVD_CTRL: + /* HTC ctrl ep mbox address has already been setup in + * ath10k_sdio_hif_start + */ + break; + case ATH10K_HTC_SVC_ID_WMI_CONTROL: + ar_sdio->mbox_addr[eid] = wmi_addr; + ar_sdio->mbox_size[eid] = wmi_mbox_size; + ath10k_dbg(ar, ATH10K_DBG_SDIO, + "sdio wmi ctrl mbox_addr 0x%x mbox_size %d\n", + ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]); + break; + case ATH10K_HTC_SVC_ID_HTT_DATA_MSG: + ar_sdio->mbox_addr[eid] = htt_addr; + ar_sdio->mbox_size[eid] = htt_mbox_size; + ath10k_dbg(ar, ATH10K_DBG_SDIO, + "sdio htt data mbox_addr 0x%x mbox_size %d\n", + ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]); + break; + default: + ath10k_warn(ar, "unsupported HTC service id: %d\n", + service_id); + return -EINVAL; + } + + return 0; +} + +static void ath10k_sdio_hif_get_default_pipe(struct ath10k *ar, + u8 *ul_pipe, u8 *dl_pipe) +{ + ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hif get default pipe\n"); + + /* HTC ctrl ep (SVC id 1) always has eid (and pipe_id in our + * case) == 0 + */ + *ul_pipe = 0; + *dl_pipe = 0; +} + +static const struct ath10k_hif_ops ath10k_sdio_hif_ops = { + .tx_sg = ath10k_sdio_hif_tx_sg, + .diag_read = ath10k_sdio_hif_diag_read, + .diag_write = ath10k_sdio_hif_diag_write_mem, + .exchange_bmi_msg = ath10k_sdio_bmi_exchange_msg, + .start = ath10k_sdio_hif_start, + .stop = ath10k_sdio_hif_stop, + .start_post = ath10k_sdio_hif_start_post, + .get_htt_tx_complete = ath10k_sdio_get_htt_tx_complete, + .map_service_to_pipe = ath10k_sdio_hif_map_service_to_pipe, + .get_default_pipe = ath10k_sdio_hif_get_default_pipe, + .power_up = ath10k_sdio_hif_power_up, + .power_down = ath10k_sdio_hif_power_down, +#ifdef CONFIG_PM + .suspend = ath10k_sdio_hif_suspend, + .resume = ath10k_sdio_hif_resume, +#endif +}; + +#ifdef CONFIG_PM_SLEEP + +/* Empty handlers so that mmc subsystem doesn't remove us entirely during + * suspend. We instead follow cfg80211 suspend/resume handlers. + */ +static int ath10k_sdio_pm_suspend(struct device *device) +{ + struct sdio_func *func = dev_to_sdio_func(device); + struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func); + struct ath10k *ar = ar_sdio->ar; + mmc_pm_flag_t pm_flag, pm_caps; + int ret; + + if (!device_may_wakeup(ar->dev)) + return 0; + + ath10k_sdio_set_mbox_sleep(ar, true); + + pm_flag = MMC_PM_KEEP_POWER; + + ret = sdio_set_host_pm_flags(func, pm_flag); + if (ret) { + pm_caps = sdio_get_host_pm_caps(func); + ath10k_warn(ar, "failed to set sdio host pm flags (0x%x, 0x%x): %d\n", + pm_flag, pm_caps, ret); + return ret; + } + + return ret; +} + +static int ath10k_sdio_pm_resume(struct device *device) +{ + return 0; +} + +static SIMPLE_DEV_PM_OPS(ath10k_sdio_pm_ops, ath10k_sdio_pm_suspend, + ath10k_sdio_pm_resume); + +#define ATH10K_SDIO_PM_OPS (&ath10k_sdio_pm_ops) + +#else + +#define ATH10K_SDIO_PM_OPS NULL + +#endif /* CONFIG_PM_SLEEP */ + +static int ath10k_sdio_napi_poll(struct napi_struct *ctx, int budget) +{ + struct ath10k *ar = container_of(ctx, struct ath10k, napi); + int done; + + done = ath10k_htt_rx_hl_indication(ar, budget); + ath10k_dbg(ar, ATH10K_DBG_SDIO, "napi poll: done: %d, budget:%d\n", done, budget); + + if (done < budget) + napi_complete_done(ctx, done); + + return done; +} + +static int ath10k_sdio_read_host_interest_value(struct ath10k *ar, + u32 item_offset, + u32 *val) +{ + u32 addr; + int ret; + + addr = host_interest_item_address(item_offset); + + ret = ath10k_sdio_diag_read32(ar, addr, val); + + if (ret) + ath10k_warn(ar, "unable to read host interest offset %d value\n", + item_offset); + + return ret; +} + +static int ath10k_sdio_read_mem(struct ath10k *ar, u32 address, void *buf, + u32 buf_len) +{ + u32 val; + int i, ret; + + for (i = 0; i < buf_len; i += 4) { + ret = ath10k_sdio_diag_read32(ar, address + i, &val); + if (ret) { + ath10k_warn(ar, "unable to read mem %d value\n", address + i); + break; + } + memcpy(buf + i, &val, 4); + } + + return ret; +} + +static bool ath10k_sdio_is_fast_dump_supported(struct ath10k *ar) +{ + u32 param; + + ath10k_sdio_read_host_interest_value(ar, HI_ITEM(hi_option_flag2), ¶m); + + ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hi_option_flag2 %x\n", param); + + return !!(param & HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_FW); +} + +static void ath10k_sdio_dump_registers(struct ath10k *ar, + struct ath10k_fw_crash_data *crash_data, + bool fast_dump) +{ + u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {}; + int i, ret; + u32 reg_dump_area; + + ret = ath10k_sdio_read_host_interest_value(ar, HI_ITEM(hi_failure_state), + ®_dump_area); + if (ret) { + ath10k_warn(ar, "failed to read firmware dump area: %d\n", ret); + return; + } + + if (fast_dump) + ret = ath10k_bmi_read_memory(ar, reg_dump_area, reg_dump_values, + sizeof(reg_dump_values)); + else + ret = ath10k_sdio_read_mem(ar, reg_dump_area, reg_dump_values, + sizeof(reg_dump_values)); + + if (ret) { + ath10k_warn(ar, "failed to read firmware dump value: %d\n", ret); + return; + } + + ath10k_err(ar, "firmware register dump:\n"); + for (i = 0; i < ARRAY_SIZE(reg_dump_values); i += 4) + ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n", + i, + reg_dump_values[i], + reg_dump_values[i + 1], + reg_dump_values[i + 2], + reg_dump_values[i + 3]); + + if (!crash_data) + return; + + for (i = 0; i < ARRAY_SIZE(reg_dump_values); i++) + crash_data->registers[i] = __cpu_to_le32(reg_dump_values[i]); +} + +static int ath10k_sdio_dump_memory_section(struct ath10k *ar, + const struct ath10k_mem_region *mem_region, + u8 *buf, size_t buf_len) +{ + const struct ath10k_mem_section *cur_section, *next_section; + unsigned int count, section_size, skip_size; + int ret, i, j; + + if (!mem_region || !buf) + return 0; + + cur_section = &mem_region->section_table.sections[0]; + + if (mem_region->start > cur_section->start) { + ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n", + mem_region->start, cur_section->start); + return 0; + } + + skip_size = cur_section->start - mem_region->start; + + /* fill the gap between the first register section and register + * start address + */ + for (i = 0; i < skip_size; i++) { + *buf = ATH10K_MAGIC_NOT_COPIED; + buf++; + } + + count = 0; + i = 0; + for (; cur_section; cur_section = next_section) { + section_size = cur_section->end - cur_section->start; + + if (section_size <= 0) { + ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n", + cur_section->start, + cur_section->end); + break; + } + + if (++i == mem_region->section_table.size) { + /* last section */ + next_section = NULL; + skip_size = 0; + } else { + next_section = cur_section + 1; + + if (cur_section->end > next_section->start) { + ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n", + next_section->start, + cur_section->end); + break; + } + + skip_size = next_section->start - cur_section->end; + } + + if (buf_len < (skip_size + section_size)) { + ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len); + break; + } + + buf_len -= skip_size + section_size; + + /* read section to dest memory */ + ret = ath10k_sdio_read_mem(ar, cur_section->start, + buf, section_size); + if (ret) { + ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n", + cur_section->start, ret); + break; + } + + buf += section_size; + count += section_size; + + /* fill in the gap between this section and the next */ + for (j = 0; j < skip_size; j++) { + *buf = ATH10K_MAGIC_NOT_COPIED; + buf++; + } + + count += skip_size; + } + + return count; +} + +/* if an error happened returns < 0, otherwise the length */ +static int ath10k_sdio_dump_memory_generic(struct ath10k *ar, + const struct ath10k_mem_region *current_region, + u8 *buf, + bool fast_dump) +{ + int ret; + + if (current_region->section_table.size > 0) + /* Copy each section individually. */ + return ath10k_sdio_dump_memory_section(ar, + current_region, + buf, + current_region->len); + + /* No individual memory sections defined so we can + * copy the entire memory region. + */ + if (fast_dump) + ret = ath10k_bmi_read_memory(ar, + current_region->start, + buf, + current_region->len); + else + ret = ath10k_sdio_read_mem(ar, + current_region->start, + buf, + current_region->len); + + if (ret) { + ath10k_warn(ar, "failed to copy ramdump region %s: %d\n", + current_region->name, ret); + return ret; + } + + return current_region->len; +} + +static void ath10k_sdio_dump_memory(struct ath10k *ar, + struct ath10k_fw_crash_data *crash_data, + bool fast_dump) +{ + const struct ath10k_hw_mem_layout *mem_layout; + const struct ath10k_mem_region *current_region; + struct ath10k_dump_ram_data_hdr *hdr; + u32 count; + size_t buf_len; + int ret, i; + u8 *buf; + + if (!crash_data) + return; + + mem_layout = ath10k_coredump_get_mem_layout(ar); + if (!mem_layout) + return; + + current_region = &mem_layout->region_table.regions[0]; + + buf = crash_data->ramdump_buf; + buf_len = crash_data->ramdump_buf_len; + + memset(buf, 0, buf_len); + + for (i = 0; i < mem_layout->region_table.size; i++) { + count = 0; + + if (current_region->len > buf_len) { + ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n", + current_region->name, + current_region->len, + buf_len); + break; + } + + /* Reserve space for the header. */ + hdr = (void *)buf; + buf += sizeof(*hdr); + buf_len -= sizeof(*hdr); + + ret = ath10k_sdio_dump_memory_generic(ar, current_region, buf, + fast_dump); + if (ret >= 0) + count = ret; + + hdr->region_type = cpu_to_le32(current_region->type); + hdr->start = cpu_to_le32(current_region->start); + hdr->length = cpu_to_le32(count); + + if (count == 0) + /* Note: the header remains, just with zero length. */ + break; + + buf += count; + buf_len -= count; + + current_region++; + } +} + +void ath10k_sdio_fw_crashed_dump(struct ath10k *ar) +{ + struct ath10k_fw_crash_data *crash_data; + char guid[UUID_STRING_LEN + 1]; + bool fast_dump; + + fast_dump = ath10k_sdio_is_fast_dump_supported(ar); + + if (fast_dump) + ath10k_bmi_start(ar); + + ar->stats.fw_crash_counter++; + + ath10k_sdio_disable_intrs(ar); + + crash_data = ath10k_coredump_new(ar); + + if (crash_data) + scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid); + else + scnprintf(guid, sizeof(guid), "n/a"); + + ath10k_err(ar, "firmware crashed! (guid %s)\n", guid); + ath10k_print_driver_info(ar); + ath10k_sdio_dump_registers(ar, crash_data, fast_dump); + ath10k_sdio_dump_memory(ar, crash_data, fast_dump); + + ath10k_sdio_enable_intrs(ar); + + ath10k_core_start_recovery(ar); +} + +static int ath10k_sdio_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + struct ath10k_sdio *ar_sdio; + struct ath10k *ar; + enum ath10k_hw_rev hw_rev; + u32 dev_id_base; + struct ath10k_bus_params bus_params = {}; + int ret, i; + + /* Assumption: All SDIO based chipsets (so far) are QCA6174 based. + * If there will be newer chipsets that does not use the hw reg + * setup as defined in qca6174_regs and qca6174_values, this + * assumption is no longer valid and hw_rev must be setup differently + * depending on chipset. + */ + hw_rev = ATH10K_HW_QCA6174; + + ar = ath10k_core_create(sizeof(*ar_sdio), &func->dev, ATH10K_BUS_SDIO, + hw_rev, &ath10k_sdio_hif_ops); + if (!ar) { + dev_err(&func->dev, "failed to allocate core\n"); + return -ENOMEM; + } + + netif_napi_add(ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n", + func->num, func->vendor, func->device, + func->max_blksize, func->cur_blksize); + + ar_sdio = ath10k_sdio_priv(ar); + + ar_sdio->irq_data.irq_proc_reg = + devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_proc_regs), + GFP_KERNEL); + if (!ar_sdio->irq_data.irq_proc_reg) { + ret = -ENOMEM; + goto err_core_destroy; + } + + ar_sdio->vsg_buffer = devm_kmalloc(ar->dev, ATH10K_SDIO_VSG_BUF_SIZE, GFP_KERNEL); + if (!ar_sdio->vsg_buffer) { + ret = -ENOMEM; + goto err_core_destroy; + } + + ar_sdio->irq_data.irq_en_reg = + devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs), + GFP_KERNEL); + if (!ar_sdio->irq_data.irq_en_reg) { + ret = -ENOMEM; + goto err_core_destroy; + } + + ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_LARGE_CMDBUF_SIZE, GFP_KERNEL); + if (!ar_sdio->bmi_buf) { + ret = -ENOMEM; + goto err_core_destroy; + } + + ar_sdio->func = func; + sdio_set_drvdata(func, ar_sdio); + + ar_sdio->is_disabled = true; + ar_sdio->ar = ar; + + spin_lock_init(&ar_sdio->lock); + spin_lock_init(&ar_sdio->wr_async_lock); + mutex_init(&ar_sdio->irq_data.mtx); + + INIT_LIST_HEAD(&ar_sdio->bus_req_freeq); + INIT_LIST_HEAD(&ar_sdio->wr_asyncq); + + INIT_WORK(&ar_sdio->wr_async_work, ath10k_sdio_write_async_work); + ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq"); + if (!ar_sdio->workqueue) { + ret = -ENOMEM; + goto err_core_destroy; + } + + for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++) + ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]); + + skb_queue_head_init(&ar_sdio->rx_head); + INIT_WORK(&ar_sdio->async_work_rx, ath10k_rx_indication_async_work); + + dev_id_base = (id->device & 0x0F00); + if (dev_id_base != (SDIO_DEVICE_ID_ATHEROS_AR6005 & 0x0F00) && + dev_id_base != (SDIO_DEVICE_ID_ATHEROS_QCA9377 & 0x0F00)) { + ret = -ENODEV; + ath10k_err(ar, "unsupported device id %u (0x%x)\n", + dev_id_base, id->device); + goto err_free_wq; + } + + ar->dev_id = QCA9377_1_0_DEVICE_ID; + ar->id.vendor = id->vendor; + ar->id.device = id->device; + + ath10k_sdio_set_mbox_info(ar); + + bus_params.dev_type = ATH10K_DEV_TYPE_HL; + /* TODO: don't know yet how to get chip_id with SDIO */ + bus_params.chip_id = 0; + bus_params.hl_msdu_ids = true; + + ar->hw->max_mtu = ETH_DATA_LEN; + + ret = ath10k_core_register(ar, &bus_params); + if (ret) { + ath10k_err(ar, "failed to register driver core: %d\n", ret); + goto err_free_wq; + } + + timer_setup(&ar_sdio->sleep_timer, ath10k_sdio_sleep_timer_handler, 0); + + return 0; + +err_free_wq: + destroy_workqueue(ar_sdio->workqueue); +err_core_destroy: + ath10k_core_destroy(ar); + + return ret; +} + +static void ath10k_sdio_remove(struct sdio_func *func) +{ + struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func); + struct ath10k *ar = ar_sdio->ar; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "sdio removed func %d vendor 0x%x device 0x%x\n", + func->num, func->vendor, func->device); + + ath10k_core_unregister(ar); + + netif_napi_del(&ar->napi); + + destroy_workqueue(ar_sdio->workqueue); + + ath10k_core_destroy(ar); +} + +static const struct sdio_device_id ath10k_sdio_devices[] = { + {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6005)}, + {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_QCA9377)}, + {}, +}; + +MODULE_DEVICE_TABLE(sdio, ath10k_sdio_devices); + +static struct sdio_driver ath10k_sdio_driver = { + .name = "ath10k_sdio", + .id_table = ath10k_sdio_devices, + .probe = ath10k_sdio_probe, + .remove = ath10k_sdio_remove, + .drv = { + .pm = ATH10K_SDIO_PM_OPS, + }, +}; +module_sdio_driver(ath10k_sdio_driver); + +MODULE_AUTHOR("Qualcomm Atheros"); +MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN SDIO devices"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/ath/ath10k/sdio.h b/drivers/net/wireless/ath/ath10k/sdio.h new file mode 100644 index 000000000000..b6ac927628b1 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/sdio.h @@ -0,0 +1,236 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2004-2011 Atheros Communications Inc. + * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. + * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com> + */ + +#ifndef _SDIO_H_ +#define _SDIO_H_ + +#define ATH10K_HIF_MBOX_BLOCK_SIZE 256 + +#define ATH10K_SDIO_MAX_BUFFER_SIZE 4096 /*Unsure of this constant*/ + +/* Mailbox address in SDIO address space */ +#define ATH10K_HIF_MBOX_BASE_ADDR 0x1000 +#define ATH10K_HIF_MBOX_WIDTH 0x800 + +#define ATH10K_HIF_MBOX_TOT_WIDTH \ + (ATH10K_HIF_MBOX_NUM_MAX * ATH10K_HIF_MBOX_WIDTH) + +#define ATH10K_HIF_MBOX0_EXT_BASE_ADDR 0x5000 +#define ATH10K_HIF_MBOX0_EXT_WIDTH (36 * 1024) +#define ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0 (56 * 1024) +#define ATH10K_HIF_MBOX1_EXT_WIDTH (36 * 1024) +#define ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE (2 * 1024) + +#define ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH \ + (ATH10K_SDIO_MAX_BUFFER_SIZE - sizeof(struct ath10k_htc_hdr)) + +#define ATH10K_HIF_MBOX_NUM_MAX 4 +#define ATH10K_SDIO_BUS_REQUEST_MAX_NUM 1024 + +#define ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ (100 * HZ) + +/* HTC runs over mailbox 0 */ +#define ATH10K_HTC_MAILBOX 0 +#define ATH10K_HTC_MAILBOX_MASK BIT(ATH10K_HTC_MAILBOX) + +/* GMBOX addresses */ +#define ATH10K_HIF_GMBOX_BASE_ADDR 0x7000 +#define ATH10K_HIF_GMBOX_WIDTH 0x4000 + +/* Modified versions of the sdio.h macros. + * The macros in sdio.h can't be used easily with the FIELD_{PREP|GET} + * macros in bitfield.h, so we define our own macros here. + */ +#define ATH10K_SDIO_DRIVE_DTSX_MASK \ + (SDIO_DRIVE_DTSx_MASK << SDIO_DRIVE_DTSx_SHIFT) + +#define ATH10K_SDIO_DRIVE_DTSX_TYPE_B 0 +#define ATH10K_SDIO_DRIVE_DTSX_TYPE_A 1 +#define ATH10K_SDIO_DRIVE_DTSX_TYPE_C 2 +#define ATH10K_SDIO_DRIVE_DTSX_TYPE_D 3 + +/* SDIO CCCR register definitions */ +#define CCCR_SDIO_IRQ_MODE_REG 0xF0 +#define CCCR_SDIO_IRQ_MODE_REG_SDIO3 0x16 + +#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR 0xF2 + +#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A 0x02 +#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C 0x04 +#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D 0x08 + +#define CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS 0xF0 +#define CCCR_SDIO_ASYNC_INT_DELAY_MASK 0xC0 + +/* mode to enable special 4-bit interrupt assertion without clock */ +#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ BIT(0) +#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3 BIT(1) + +#define ATH10K_SDIO_TARGET_DEBUG_INTR_MASK 0x01 + +/* The theoretical maximum number of RX messages that can be fetched + * from the mbox interrupt handler in one loop is derived in the following + * way: + * + * Let's assume that each packet in a bundle of the maximum bundle size + * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE) has the HTC header bundle count set + * to the maximum value (HTC_HOST_MAX_MSG_PER_RX_BUNDLE). + * + * in this case the driver must allocate + * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * 2) skb's. + */ +#define ATH10K_SDIO_MAX_RX_MSGS \ + (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * 2) + +#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL 0x00000868u +#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF +#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON 0x10000 + +enum sdio_mbox_state { + SDIO_MBOX_UNKNOWN_STATE = 0, + SDIO_MBOX_REQUEST_TO_SLEEP_STATE = 1, + SDIO_MBOX_SLEEP_STATE = 2, + SDIO_MBOX_AWAKE_STATE = 3, +}; + +#define ATH10K_CIS_READ_WAIT_4_RTC_CYCLE_IN_US 125 +#define ATH10K_CIS_RTC_STATE_ADDR 0x1138 +#define ATH10K_CIS_RTC_STATE_ON 0x01 +#define ATH10K_CIS_XTAL_SETTLE_DURATION_IN_US 1500 +#define ATH10K_CIS_READ_RETRY 10 +#define ATH10K_MIN_SLEEP_INACTIVITY_TIME_MS 50 + +/* TODO: remove this and use skb->cb instead, much cleaner approach */ +struct ath10k_sdio_bus_request { + struct list_head list; + + /* sdio address */ + u32 address; + + struct sk_buff *skb; + enum ath10k_htc_ep_id eid; + int status; + /* Specifies if the current request is an HTC message. + * If not, the eid is not applicable an the TX completion handler + * associated with the endpoint will not be invoked. + */ + bool htc_msg; + /* Completion that (if set) will be invoked for non HTC requests + * (htc_msg == false) when the request has been processed. + */ + struct completion *comp; +}; + +struct ath10k_sdio_rx_data { + struct sk_buff *skb; + size_t alloc_len; + size_t act_len; + enum ath10k_htc_ep_id eid; + bool part_of_bundle; + bool last_in_bundle; + bool trailer_only; +}; + +struct ath10k_sdio_irq_proc_regs { + u8 host_int_status; + u8 cpu_int_status; + u8 error_int_status; + u8 counter_int_status; + u8 mbox_frame; + u8 rx_lookahead_valid; + u8 host_int_status2; + u8 gmbox_rx_avail; + __le32 rx_lookahead[2 * ATH10K_HIF_MBOX_NUM_MAX]; + __le32 int_status_enable; +}; + +struct ath10k_sdio_irq_enable_regs { + u8 int_status_en; + u8 cpu_int_status_en; + u8 err_int_status_en; + u8 cntr_int_status_en; +}; + +struct ath10k_sdio_irq_data { + /* protects irq_proc_reg and irq_en_reg below. + * We use a mutex here and not a spinlock since we will have the + * mutex locked while calling the sdio_memcpy_ functions. + * These function require non atomic context, and hence, spinlocks + * can be held while calling these functions. + */ + struct mutex mtx; + struct ath10k_sdio_irq_proc_regs *irq_proc_reg; + struct ath10k_sdio_irq_enable_regs *irq_en_reg; +}; + +struct ath10k_mbox_ext_info { + u32 htc_ext_addr; + u32 htc_ext_sz; +}; + +struct ath10k_mbox_info { + u32 htc_addr; + struct ath10k_mbox_ext_info ext_info[2]; + u32 block_size; + u32 block_mask; + u32 gmbox_addr; + u32 gmbox_sz; +}; + +struct ath10k_sdio { + struct sdio_func *func; + + struct ath10k_mbox_info mbox_info; + bool swap_mbox; + u32 mbox_addr[ATH10K_HTC_EP_COUNT]; + u32 mbox_size[ATH10K_HTC_EP_COUNT]; + + /* available bus requests */ + struct ath10k_sdio_bus_request bus_req[ATH10K_SDIO_BUS_REQUEST_MAX_NUM]; + /* free list of bus requests */ + struct list_head bus_req_freeq; + + struct sk_buff_head rx_head; + + /* protects access to bus_req_freeq */ + spinlock_t lock; + + struct ath10k_sdio_rx_data rx_pkts[ATH10K_SDIO_MAX_RX_MSGS]; + size_t n_rx_pkts; + + struct ath10k *ar; + struct ath10k_sdio_irq_data irq_data; + + /* temporary buffer for sdio read. + * It is allocated when probe, and used for receive bundled packets, + * the read for bundled packets is not parallel, so it does not need + * protected. + */ + u8 *vsg_buffer; + + /* temporary buffer for BMI requests */ + u8 *bmi_buf; + + bool is_disabled; + + struct workqueue_struct *workqueue; + struct work_struct wr_async_work; + struct list_head wr_asyncq; + /* protects access to wr_asyncq */ + spinlock_t wr_async_lock; + + struct work_struct async_work_rx; + struct timer_list sleep_timer; + enum sdio_mbox_state mbox_state; +}; + +static inline struct ath10k_sdio *ath10k_sdio_priv(struct ath10k *ar) +{ + return (struct ath10k_sdio *)ar->drv_priv; +} + +#endif diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c new file mode 100644 index 000000000000..b3f6424c17d3 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -0,0 +1,1895 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + */ + +#include <linux/bits.h> +#include <linux/clk.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/regulator/consumer.h> +#include <linux/remoteproc/qcom_rproc.h> +#include <linux/of_reserved_mem.h> +#include <linux/iommu.h> + +#include "ce.h" +#include "coredump.h" +#include "debug.h" +#include "hif.h" +#include "htc.h" +#include "snoc.h" + +#define ATH10K_SNOC_RX_POST_RETRY_MS 50 +#define CE_POLL_PIPE 4 +#define ATH10K_SNOC_WAKE_IRQ 2 + +static char *const ce_name[] = { + "WLAN_CE_0", + "WLAN_CE_1", + "WLAN_CE_2", + "WLAN_CE_3", + "WLAN_CE_4", + "WLAN_CE_5", + "WLAN_CE_6", + "WLAN_CE_7", + "WLAN_CE_8", + "WLAN_CE_9", + "WLAN_CE_10", + "WLAN_CE_11", +}; + +static const char * const ath10k_regulators[] = { + "vdd-0.8-cx-mx", + "vdd-1.8-xo", + "vdd-1.3-rfa", + "vdd-3.3-ch0", + "vdd-3.3-ch1", +}; + +static const char * const ath10k_clocks[] = { + "cxo_ref_clk_pin", "qdss", +}; + +static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state); + +static const struct ath10k_snoc_drv_priv drv_priv = { + .hw_rev = ATH10K_HW_WCN3990, + .dma_mask = DMA_BIT_MASK(35), + .msa_size = 0x100000, +}; + +#define WCN3990_SRC_WR_IDX_OFFSET 0x3C +#define WCN3990_DST_WR_IDX_OFFSET 0x40 + +static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = { + { + .ce_id = __cpu_to_le16(0), + .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET), + }, + + { + .ce_id = __cpu_to_le16(3), + .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET), + }, + + { + .ce_id = __cpu_to_le16(4), + .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET), + }, + + { + .ce_id = __cpu_to_le16(5), + .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET), + }, + + { + .ce_id = __cpu_to_le16(7), + .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET), + }, + + { + .ce_id = __cpu_to_le16(1), + .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET), + }, + + { + .ce_id = __cpu_to_le16(2), + .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET), + }, + + { + .ce_id = __cpu_to_le16(7), + .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET), + }, + + { + .ce_id = __cpu_to_le16(8), + .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET), + }, + + { + .ce_id = __cpu_to_le16(9), + .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET), + }, + + { + .ce_id = __cpu_to_le16(10), + .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET), + }, + + { + .ce_id = __cpu_to_le16(11), + .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET), + }, +}; + +static struct ce_attr host_ce_config_wlan[] = { + /* CE0: host->target HTC control streams */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 16, + .src_sz_max = 2048, + .dest_nentries = 0, + .send_cb = ath10k_snoc_htc_tx_cb, + }, + + /* CE1: target->host HTT + HTC control */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath10k_snoc_htt_htc_rx_cb, + }, + + /* CE2: target->host WMI */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 64, + .recv_cb = ath10k_snoc_htc_rx_cb, + }, + + /* CE3: host->target WMI */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 32, + .src_sz_max = 2048, + .dest_nentries = 0, + .send_cb = ath10k_snoc_htc_tx_cb, + }, + + /* CE4: host->target HTT */ + { + .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, + .src_nentries = 2048, + .src_sz_max = 256, + .dest_nentries = 0, + .send_cb = ath10k_snoc_htt_tx_cb, + }, + + /* CE5: target->host HTT (ipa_uc->target ) */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 512, + .dest_nentries = 512, + .recv_cb = ath10k_snoc_htt_rx_cb, + }, + + /* CE6: target autonomous hif_memcpy */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* CE7: ce_diag, the Diagnostic Window */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 2, + .src_sz_max = 2048, + .dest_nentries = 2, + }, + + /* CE8: Target to uMC */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 128, + }, + + /* CE9 target->host HTT */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath10k_snoc_htt_htc_rx_cb, + }, + + /* CE10: target->host HTT */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath10k_snoc_htt_htc_rx_cb, + }, + + /* CE11: target -> host PKTLOG */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath10k_snoc_pktlog_rx_cb, + }, +}; + +static struct ce_pipe_config target_ce_config_wlan[] = { + /* CE0: host->target HTC control and raw streams */ + { + .pipenum = __cpu_to_le32(0), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE1: target->host HTT + HTC control */ + { + .pipenum = __cpu_to_le32(1), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE2: target->host WMI */ + { + .pipenum = __cpu_to_le32(2), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(64), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE3: host->target WMI */ + { + .pipenum = __cpu_to_le32(3), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE4: host->target HTT */ + { + .pipenum = __cpu_to_le32(4), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(256), + .nbytes_max = __cpu_to_le32(256), + .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), + .reserved = __cpu_to_le32(0), + }, + + /* CE5: target->host HTT (HIF->HTT) */ + { + .pipenum = __cpu_to_le32(5), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(1024), + .nbytes_max = __cpu_to_le32(64), + .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), + .reserved = __cpu_to_le32(0), + }, + + /* CE6: Reserved for target autonomous hif_memcpy */ + { + .pipenum = __cpu_to_le32(6), + .pipedir = __cpu_to_le32(PIPEDIR_INOUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(16384), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE7 used only by Host */ + { + .pipenum = __cpu_to_le32(7), + .pipedir = __cpu_to_le32(4), + .nentries = __cpu_to_le32(0), + .nbytes_max = __cpu_to_le32(0), + .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), + .reserved = __cpu_to_le32(0), + }, + + /* CE8 Target to uMC */ + { + .pipenum = __cpu_to_le32(8), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(0), + .reserved = __cpu_to_le32(0), + }, + + /* CE9 target->host HTT */ + { + .pipenum = __cpu_to_le32(9), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE10 target->host HTT */ + { + .pipenum = __cpu_to_le32(10), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE11 target autonomous qcache memcpy */ + { + .pipenum = __cpu_to_le32(11), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, +}; + +static struct ce_service_to_pipe target_service_to_ce_map_wlan[] = { + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(0), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { /* not used */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(0), + }, + { /* not used */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(4), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(1), + }, + { /* not used */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), + __cpu_to_le32(PIPEDIR_OUT), + __cpu_to_le32(5), + }, + { /* in = DL = target -> host */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(9), + }, + { /* in = DL = target -> host */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(10), + }, + { /* in = DL = target -> host pktlog */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(11), + }, + /* (Additions here) */ + + { /* must be last */ + __cpu_to_le32(0), + __cpu_to_le32(0), + __cpu_to_le32(0), + }, +}; + +static void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + iowrite32(value, ar_snoc->mem + offset); +} + +static u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + u32 val; + + val = ioread32(ar_snoc->mem + offset); + + return val; +} + +static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe) +{ + struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; + struct ath10k *ar = pipe->hif_ce_state; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct sk_buff *skb; + dma_addr_t paddr; + int ret; + + skb = dev_alloc_skb(pipe->buf_sz); + if (!skb) + return -ENOMEM; + + WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); + + paddr = dma_map_single(ar->dev, skb->data, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(ar->dev, paddr))) { + ath10k_warn(ar, "failed to dma map snoc rx buf\n"); + dev_kfree_skb_any(skb); + return -EIO; + } + + ATH10K_SKB_RXCB(skb)->paddr = paddr; + + spin_lock_bh(&ce->ce_lock); + ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr); + spin_unlock_bh(&ce->ce_lock); + if (ret) { + dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + return ret; + } + + return 0; +} + +static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe) +{ + struct ath10k *ar = pipe->hif_ce_state; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; + int ret, num; + + if (pipe->buf_sz == 0) + return; + + if (!ce_pipe->dest_ring) + return; + + spin_lock_bh(&ce->ce_lock); + num = __ath10k_ce_rx_num_free_bufs(ce_pipe); + spin_unlock_bh(&ce->ce_lock); + while (num--) { + ret = __ath10k_snoc_rx_post_buf(pipe); + if (ret) { + if (ret == -ENOSPC) + break; + ath10k_warn(ar, "failed to post rx buf: %d\n", ret); + mod_timer(&ar_snoc->rx_post_retry, jiffies + + ATH10K_SNOC_RX_POST_RETRY_MS); + break; + } + } +} + +static void ath10k_snoc_rx_post(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int i; + + for (i = 0; i < CE_COUNT; i++) + ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]); +} + +static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state, + void (*callback)(struct ath10k *ar, + struct sk_buff *skb)) +{ + struct ath10k *ar = ce_state->ar; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_snoc_pipe *pipe_info = &ar_snoc->pipe_info[ce_state->id]; + struct sk_buff *skb; + struct sk_buff_head list; + void *transfer_context; + unsigned int nbytes, max_nbytes; + + __skb_queue_head_init(&list); + while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, + &nbytes) == 0) { + skb = transfer_context; + max_nbytes = skb->len + skb_tailroom(skb); + dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, + max_nbytes, DMA_FROM_DEVICE); + + if (unlikely(max_nbytes < nbytes)) { + ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)\n", + nbytes, max_nbytes); + dev_kfree_skb_any(skb); + continue; + } + + skb_put(skb, nbytes); + __skb_queue_tail(&list, skb); + } + + while ((skb = __skb_dequeue(&list))) { + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n", + ce_state->id, skb->len); + + callback(ar, skb); + } + + ath10k_snoc_rx_post_pipe(pipe_info); +} + +static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state) +{ + ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); +} + +static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state) +{ + /* CE4 polling needs to be done whenever CE pipe which transports + * HTT Rx (target->host) is processed. + */ + ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE); + + ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); +} + +/* Called by lower (CE) layer when data is received from the Target. + * WCN3990 firmware uses separate CE(CE11) to transfer pktlog data. + */ +static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state) +{ + ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); +} + +static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb) +{ + skb_pull(skb, sizeof(struct ath10k_htc_hdr)); + ath10k_htt_t2h_msg_handler(ar, skb); +} + +static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state) +{ + ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE); + ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver); +} + +static void ath10k_snoc_rx_replenish_retry(struct timer_list *t) +{ + struct ath10k_snoc *ar_snoc = timer_container_of(ar_snoc, t, + rx_post_retry); + struct ath10k *ar = ar_snoc->ar; + + ath10k_snoc_rx_post(ar); +} + +static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state) +{ + struct ath10k *ar = ce_state->ar; + struct sk_buff_head list; + struct sk_buff *skb; + + __skb_queue_head_init(&list); + while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { + if (!skb) + continue; + + __skb_queue_tail(&list, skb); + } + + while ((skb = __skb_dequeue(&list))) + ath10k_htc_tx_completion_handler(ar, skb); +} + +static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state) +{ + struct ath10k *ar = ce_state->ar; + struct sk_buff *skb; + + while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { + if (!skb) + continue; + + dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, + skb->len, DMA_TO_DEVICE); + ath10k_htt_hif_tx_complete(ar, skb); + } +} + +static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id, + struct ath10k_hif_sg_item *items, int n_items) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_snoc_pipe *snoc_pipe; + struct ath10k_ce_pipe *ce_pipe; + int err, i = 0; + + snoc_pipe = &ar_snoc->pipe_info[pipe_id]; + ce_pipe = snoc_pipe->ce_hdl; + spin_lock_bh(&ce->ce_lock); + + for (i = 0; i < n_items - 1; i++) { + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "snoc tx item %d paddr %pad len %d n_items %d\n", + i, &items[i].paddr, items[i].len, n_items); + + err = ath10k_ce_send_nolock(ce_pipe, + items[i].transfer_context, + items[i].paddr, + items[i].len, + items[i].transfer_id, + CE_SEND_FLAG_GATHER); + if (err) + goto err; + } + + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "snoc tx item %d paddr %pad len %d n_items %d\n", + i, &items[i].paddr, items[i].len, n_items); + + err = ath10k_ce_send_nolock(ce_pipe, + items[i].transfer_context, + items[i].paddr, + items[i].len, + items[i].transfer_id, + 0); + if (err) + goto err; + + spin_unlock_bh(&ce->ce_lock); + + return 0; + +err: + for (; i > 0; i--) + __ath10k_ce_send_revert(ce_pipe); + + spin_unlock_bh(&ce->ce_lock); + return err; +} + +static int ath10k_snoc_hif_get_target_info(struct ath10k *ar, + struct bmi_target_info *target_info) +{ + target_info->version = ATH10K_HW_WCN3990; + target_info->type = ATH10K_HW_WCN3990; + + return 0; +} + +static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n"); + + return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl); +} + +static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe, + int force) +{ + int resources; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n"); + + if (!force) { + resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe); + + if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1)) + return; + } + ath10k_ce_per_engine_service(ar, pipe); +} + +static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar, + u16 service_id, + u8 *ul_pipe, u8 *dl_pipe) +{ + const struct ce_service_to_pipe *entry; + bool ul_set = false, dl_set = false; + int i; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n"); + + for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) { + entry = &target_service_to_ce_map_wlan[i]; + + if (__le32_to_cpu(entry->service_id) != service_id) + continue; + + switch (__le32_to_cpu(entry->pipedir)) { + case PIPEDIR_NONE: + break; + case PIPEDIR_IN: + WARN_ON(dl_set); + *dl_pipe = __le32_to_cpu(entry->pipenum); + dl_set = true; + break; + case PIPEDIR_OUT: + WARN_ON(ul_set); + *ul_pipe = __le32_to_cpu(entry->pipenum); + ul_set = true; + break; + case PIPEDIR_INOUT: + WARN_ON(dl_set); + WARN_ON(ul_set); + *dl_pipe = __le32_to_cpu(entry->pipenum); + *ul_pipe = __le32_to_cpu(entry->pipenum); + dl_set = true; + ul_set = true; + break; + } + } + + if (!ul_set || !dl_set) + return -ENOENT; + + return 0; +} + +static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar, + u8 *ul_pipe, u8 *dl_pipe) +{ + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n"); + + (void)ath10k_snoc_hif_map_service_to_pipe(ar, + ATH10K_HTC_SVC_ID_RSVD_CTRL, + ul_pipe, dl_pipe); +} + +static inline void ath10k_snoc_irq_disable(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int id; + + for (id = 0; id < CE_COUNT_MAX; id++) + disable_irq(ar_snoc->ce_irqs[id].irq_line); +} + +static inline void ath10k_snoc_irq_enable(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int id; + + for (id = 0; id < CE_COUNT_MAX; id++) + enable_irq(ar_snoc->ce_irqs[id].irq_line); +} + +static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe) +{ + struct ath10k_ce_pipe *ce_pipe; + struct ath10k_ce_ring *ce_ring; + struct sk_buff *skb; + struct ath10k *ar; + int i; + + ar = snoc_pipe->hif_ce_state; + ce_pipe = snoc_pipe->ce_hdl; + ce_ring = ce_pipe->dest_ring; + + if (!ce_ring) + return; + + if (!snoc_pipe->buf_sz) + return; + + for (i = 0; i < ce_ring->nentries; i++) { + skb = ce_ring->per_transfer_context[i]; + if (!skb) + continue; + + ce_ring->per_transfer_context[i] = NULL; + + dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + } +} + +static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe) +{ + struct ath10k_ce_pipe *ce_pipe; + struct ath10k_ce_ring *ce_ring; + struct sk_buff *skb; + struct ath10k *ar; + int i; + + ar = snoc_pipe->hif_ce_state; + ce_pipe = snoc_pipe->ce_hdl; + ce_ring = ce_pipe->src_ring; + + if (!ce_ring) + return; + + if (!snoc_pipe->buf_sz) + return; + + for (i = 0; i < ce_ring->nentries; i++) { + skb = ce_ring->per_transfer_context[i]; + if (!skb) + continue; + + ce_ring->per_transfer_context[i] = NULL; + + ath10k_htc_tx_completion_handler(ar, skb); + } +} + +static void ath10k_snoc_buffer_cleanup(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_snoc_pipe *pipe_info; + int pipe_num; + + timer_delete_sync(&ar_snoc->rx_post_retry); + for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { + pipe_info = &ar_snoc->pipe_info[pipe_num]; + ath10k_snoc_rx_pipe_cleanup(pipe_info); + ath10k_snoc_tx_pipe_cleanup(pipe_info); + } +} + +static void ath10k_snoc_hif_stop(struct ath10k *ar) +{ + if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) + ath10k_snoc_irq_disable(ar); + + ath10k_core_napi_sync_disable(ar); + ath10k_snoc_buffer_cleanup(ar); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n"); +} + +static int ath10k_snoc_hif_start(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + bitmap_clear(ar_snoc->pending_ce_irqs, 0, CE_COUNT_MAX); + + netif_threaded_enable(ar->napi_dev); + ath10k_core_napi_enable(ar); + /* IRQs are left enabled when we restart due to a firmware crash */ + if (!test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags)) + ath10k_snoc_irq_enable(ar); + ath10k_snoc_rx_post(ar); + + clear_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n"); + + return 0; +} + +static int ath10k_snoc_init_pipes(struct ath10k *ar) +{ + int i, ret; + + for (i = 0; i < CE_COUNT; i++) { + ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]); + if (ret) { + ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n", + i, ret); + return ret; + } + } + + return 0; +} + +static int ath10k_snoc_wlan_enable(struct ath10k *ar, + enum ath10k_firmware_mode fw_mode) +{ + struct ath10k_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX]; + struct ath10k_qmi_wlan_enable_cfg cfg; + enum wlfw_driver_mode_enum_v01 mode; + int pipe_num; + + for (pipe_num = 0; pipe_num < CE_COUNT_MAX; pipe_num++) { + tgt_cfg[pipe_num].pipe_num = + target_ce_config_wlan[pipe_num].pipenum; + tgt_cfg[pipe_num].pipe_dir = + target_ce_config_wlan[pipe_num].pipedir; + tgt_cfg[pipe_num].nentries = + target_ce_config_wlan[pipe_num].nentries; + tgt_cfg[pipe_num].nbytes_max = + target_ce_config_wlan[pipe_num].nbytes_max; + tgt_cfg[pipe_num].flags = + target_ce_config_wlan[pipe_num].flags; + tgt_cfg[pipe_num].reserved = 0; + } + + cfg.num_ce_tgt_cfg = sizeof(target_ce_config_wlan) / + sizeof(struct ath10k_tgt_pipe_cfg); + cfg.ce_tgt_cfg = (struct ath10k_tgt_pipe_cfg *) + &tgt_cfg; + cfg.num_ce_svc_pipe_cfg = sizeof(target_service_to_ce_map_wlan) / + sizeof(struct ath10k_svc_pipe_cfg); + cfg.ce_svc_cfg = (struct ath10k_svc_pipe_cfg *) + &target_service_to_ce_map_wlan; + cfg.num_shadow_reg_cfg = ARRAY_SIZE(target_shadow_reg_cfg_map); + cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *) + &target_shadow_reg_cfg_map; + + switch (fw_mode) { + case ATH10K_FIRMWARE_MODE_NORMAL: + mode = QMI_WLFW_MISSION_V01; + break; + case ATH10K_FIRMWARE_MODE_UTF: + mode = QMI_WLFW_FTM_V01; + break; + default: + ath10k_err(ar, "invalid firmware mode %d\n", fw_mode); + return -EINVAL; + } + + return ath10k_qmi_wlan_enable(ar, &cfg, mode, + NULL); +} + +static int ath10k_hw_power_on(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int ret; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n"); + + ret = regulator_bulk_enable(ar_snoc->num_vregs, ar_snoc->vregs); + if (ret) + return ret; + + ret = clk_bulk_prepare_enable(ar_snoc->num_clks, ar_snoc->clks); + if (ret) + goto vreg_off; + + return ret; + +vreg_off: + regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs); + return ret; +} + +static int ath10k_hw_power_off(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n"); + + clk_bulk_disable_unprepare(ar_snoc->num_clks, ar_snoc->clks); + + return regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs); +} + +static void ath10k_snoc_wlan_disable(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + /* If both ATH10K_FLAG_CRASH_FLUSH and ATH10K_SNOC_FLAG_RECOVERY + * flags are not set, it means that the driver has restarted + * due to a crash inject via debugfs. In this case, the driver + * needs to restart the firmware and hence send qmi wlan disable, + * during the driver restart sequence. + */ + if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags) || + !test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags)) + ath10k_qmi_wlan_disable(ar); +} + +static void ath10k_snoc_hif_power_down(struct ath10k *ar) +{ + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); + + ath10k_snoc_wlan_disable(ar); + ath10k_ce_free_rri(ar); + ath10k_hw_power_off(ar); +} + +static int ath10k_snoc_hif_power_up(struct ath10k *ar, + enum ath10k_firmware_mode fw_mode) +{ + int ret; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n", + __func__, ar->state); + + ret = ath10k_hw_power_on(ar); + if (ret) { + ath10k_err(ar, "failed to power on device: %d\n", ret); + return ret; + } + + ret = ath10k_snoc_wlan_enable(ar, fw_mode); + if (ret) { + ath10k_err(ar, "failed to enable wcn3990: %d\n", ret); + goto err_hw_power_off; + } + + ath10k_ce_alloc_rri(ar); + + ret = ath10k_snoc_init_pipes(ar); + if (ret) { + ath10k_err(ar, "failed to initialize CE: %d\n", ret); + goto err_free_rri; + } + + ath10k_ce_enable_interrupts(ar); + + return 0; + +err_free_rri: + ath10k_ce_free_rri(ar); + ath10k_snoc_wlan_disable(ar); + +err_hw_power_off: + ath10k_hw_power_off(ar); + + return ret; +} + +static int ath10k_snoc_hif_set_target_log_mode(struct ath10k *ar, + u8 fw_log_mode) +{ + u8 fw_dbg_mode; + + if (fw_log_mode) + fw_dbg_mode = ATH10K_ENABLE_FW_LOG_CE; + else + fw_dbg_mode = ATH10K_ENABLE_FW_LOG_DIAG; + + return ath10k_qmi_set_fw_log_mode(ar, fw_dbg_mode); +} + +#ifdef CONFIG_PM +static int ath10k_snoc_hif_suspend(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int ret; + + if (!device_may_wakeup(ar->dev)) + return -EPERM; + + ret = enable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line); + if (ret) { + ath10k_err(ar, "failed to enable wakeup irq :%d\n", ret); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device suspended\n"); + + return ret; +} + +static int ath10k_snoc_hif_resume(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int ret; + + if (!device_may_wakeup(ar->dev)) + return -EPERM; + + ret = disable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line); + if (ret) { + ath10k_err(ar, "failed to disable wakeup irq: %d\n", ret); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device resumed\n"); + + return ret; +} +#endif + +static const struct ath10k_hif_ops ath10k_snoc_hif_ops = { + .read32 = ath10k_snoc_read32, + .write32 = ath10k_snoc_write32, + .start = ath10k_snoc_hif_start, + .stop = ath10k_snoc_hif_stop, + .map_service_to_pipe = ath10k_snoc_hif_map_service_to_pipe, + .get_default_pipe = ath10k_snoc_hif_get_default_pipe, + .power_up = ath10k_snoc_hif_power_up, + .power_down = ath10k_snoc_hif_power_down, + .tx_sg = ath10k_snoc_hif_tx_sg, + .send_complete_check = ath10k_snoc_hif_send_complete_check, + .get_free_queue_number = ath10k_snoc_hif_get_free_queue_number, + .get_target_info = ath10k_snoc_hif_get_target_info, + .set_target_log_mode = ath10k_snoc_hif_set_target_log_mode, + +#ifdef CONFIG_PM + .suspend = ath10k_snoc_hif_suspend, + .resume = ath10k_snoc_hif_resume, +#endif +}; + +static const struct ath10k_bus_ops ath10k_snoc_bus_ops = { + .read32 = ath10k_snoc_read32, + .write32 = ath10k_snoc_write32, +}; + +static int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int i; + + for (i = 0; i < CE_COUNT_MAX; i++) { + if (ar_snoc->ce_irqs[i].irq_line == irq) + return i; + } + ath10k_err(ar, "No matching CE id for irq %d\n", irq); + + return -EINVAL; +} + +static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg) +{ + struct ath10k *ar = arg; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int ce_id = ath10k_snoc_get_ce_id_from_irq(ar, irq); + + if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) { + ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq, + ce_id); + return IRQ_HANDLED; + } + + ath10k_ce_disable_interrupt(ar, ce_id); + set_bit(ce_id, ar_snoc->pending_ce_irqs); + + napi_schedule(&ar->napi); + + return IRQ_HANDLED; +} + +static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget) +{ + struct ath10k *ar = container_of(ctx, struct ath10k, napi); + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int done = 0; + int ce_id; + + if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) { + napi_complete(ctx); + return done; + } + + for (ce_id = 0; ce_id < CE_COUNT; ce_id++) + if (test_and_clear_bit(ce_id, ar_snoc->pending_ce_irqs)) { + ath10k_ce_per_engine_service(ar, ce_id); + ath10k_ce_enable_interrupt(ar, ce_id); + } + + done = ath10k_htt_txrx_compl_task(ar, budget); + + if (done < budget) + napi_complete(ctx); + + return done; +} + +static void ath10k_snoc_init_napi(struct ath10k *ar) +{ + netif_napi_add(ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll); +} + +static int ath10k_snoc_request_irq(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int ret, id; + + for (id = 0; id < CE_COUNT_MAX; id++) { + ret = request_irq(ar_snoc->ce_irqs[id].irq_line, + ath10k_snoc_per_engine_handler, + IRQF_NO_AUTOEN, ce_name[id], ar); + if (ret) { + ath10k_err(ar, + "failed to register IRQ handler for CE %d: %d\n", + id, ret); + goto err_irq; + } + } + + return 0; + +err_irq: + for (id -= 1; id >= 0; id--) + free_irq(ar_snoc->ce_irqs[id].irq_line, ar); + + return ret; +} + +static void ath10k_snoc_free_irq(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int id; + + for (id = 0; id < CE_COUNT_MAX; id++) + free_irq(ar_snoc->ce_irqs[id].irq_line, ar); +} + +static int ath10k_snoc_resource_init(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct platform_device *pdev; + struct resource *res; + int i, ret = 0; + + pdev = ar_snoc->dev; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase"); + if (!res) { + ath10k_err(ar, "Memory base not found in DT\n"); + return -EINVAL; + } + + ar_snoc->mem_pa = res->start; + ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa, + resource_size(res)); + if (!ar_snoc->mem) { + ath10k_err(ar, "Memory base ioremap failed with physical address %pa\n", + &ar_snoc->mem_pa); + return -EINVAL; + } + + for (i = 0; i < CE_COUNT; i++) { + ret = platform_get_irq(ar_snoc->dev, i); + if (ret < 0) + return ret; + ar_snoc->ce_irqs[i].irq_line = ret; + } + + ret = device_property_read_u32(&pdev->dev, "qcom,xo-cal-data", + &ar_snoc->xo_cal_data); + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc xo-cal-data return %d\n", ret); + if (ret == 0) { + ar_snoc->xo_cal_supported = true; + ath10k_dbg(ar, ATH10K_DBG_SNOC, "xo cal data %x\n", + ar_snoc->xo_cal_data); + } + + return 0; +} + +static void ath10k_snoc_quirks_init(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct device *dev = &ar_snoc->dev->dev; + + /* ignore errors, keep NULL if there is no property */ + of_property_read_string(dev->of_node, "firmware-name", &ar->board_name); + + if (of_property_read_bool(dev->of_node, "qcom,snoc-host-cap-8bit-quirk")) + set_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags); +} + +int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_bus_params bus_params = {}; + int ret; + + if (test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags)) + return 0; + + switch (type) { + case ATH10K_QMI_EVENT_FW_READY_IND: + if (test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) { + ath10k_core_start_recovery(ar); + break; + } + + bus_params.dev_type = ATH10K_DEV_TYPE_LL; + bus_params.chip_id = ar_snoc->target_info.soc_version; + ret = ath10k_core_register(ar, &bus_params); + if (ret) { + ath10k_err(ar, "Failed to register driver core: %d\n", + ret); + return ret; + } + set_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags); + break; + case ATH10K_QMI_EVENT_FW_DOWN_IND: + set_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags); + set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags); + break; + default: + ath10k_err(ar, "invalid fw indication: %llx\n", type); + return -EINVAL; + } + + return 0; +} + +static int ath10k_snoc_setup_resource(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_snoc_pipe *pipe; + int i, ret; + + timer_setup(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, 0); + spin_lock_init(&ce->ce_lock); + for (i = 0; i < CE_COUNT; i++) { + pipe = &ar_snoc->pipe_info[i]; + pipe->ce_hdl = &ce->ce_states[i]; + pipe->pipe_num = i; + pipe->hif_ce_state = ar; + + ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]); + if (ret) { + ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n", + i, ret); + return ret; + } + + pipe->buf_sz = host_ce_config_wlan[i].src_sz_max; + } + ath10k_snoc_init_napi(ar); + + return 0; +} + +static void ath10k_snoc_release_resource(struct ath10k *ar) +{ + int i; + + netif_napi_del(&ar->napi); + for (i = 0; i < CE_COUNT; i++) + ath10k_ce_free_pipe(ar, i); +} + +static void ath10k_msa_dump_memory(struct ath10k *ar, + struct ath10k_fw_crash_data *crash_data) +{ + const struct ath10k_hw_mem_layout *mem_layout; + const struct ath10k_mem_region *current_region; + struct ath10k_dump_ram_data_hdr *hdr; + size_t buf_len; + u8 *buf; + + if (!crash_data || !crash_data->ramdump_buf) + return; + + mem_layout = ath10k_coredump_get_mem_layout(ar); + if (!mem_layout) + return; + + current_region = &mem_layout->region_table.regions[0]; + + buf = crash_data->ramdump_buf; + buf_len = crash_data->ramdump_buf_len; + memset(buf, 0, buf_len); + + /* Reserve space for the header. */ + hdr = (void *)buf; + buf += sizeof(*hdr); + buf_len -= sizeof(*hdr); + + hdr->region_type = cpu_to_le32(current_region->type); + hdr->start = cpu_to_le32((unsigned long)ar->msa.vaddr); + hdr->length = cpu_to_le32(ar->msa.mem_size); + + if (current_region->len < ar->msa.mem_size) { + memcpy(buf, ar->msa.vaddr, current_region->len); + ath10k_warn(ar, "msa dump length is less than msa size %x, %x\n", + current_region->len, ar->msa.mem_size); + } else { + memcpy(buf, ar->msa.vaddr, ar->msa.mem_size); + } +} + +void ath10k_snoc_fw_crashed_dump(struct ath10k *ar) +{ + struct ath10k_fw_crash_data *crash_data; + char guid[UUID_STRING_LEN + 1]; + + mutex_lock(&ar->dump_mutex); + + spin_lock_bh(&ar->data_lock); + ar->stats.fw_crash_counter++; + spin_unlock_bh(&ar->data_lock); + + crash_data = ath10k_coredump_new(ar); + + if (crash_data) + scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid); + else + scnprintf(guid, sizeof(guid), "n/a"); + + ath10k_err(ar, "firmware crashed! (guid %s)\n", guid); + ath10k_print_driver_info(ar); + ath10k_msa_dump_memory(ar, crash_data); + mutex_unlock(&ar->dump_mutex); +} + +static int ath10k_snoc_modem_notify(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct ath10k_snoc *ar_snoc = container_of(nb, struct ath10k_snoc, nb); + struct ath10k *ar = ar_snoc->ar; + struct qcom_ssr_notify_data *notify_data = data; + + switch (action) { + case QCOM_SSR_BEFORE_POWERUP: + ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem starting event\n"); + clear_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags); + break; + + case QCOM_SSR_AFTER_POWERUP: + ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem running event\n"); + break; + + case QCOM_SSR_BEFORE_SHUTDOWN: + ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem %s event\n", + notify_data->crashed ? "crashed" : "stopping"); + if (!notify_data->crashed) + set_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags); + else + clear_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags); + break; + + case QCOM_SSR_AFTER_SHUTDOWN: + ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem offline event\n"); + break; + + default: + ath10k_err(ar, "received unrecognized event %lu\n", action); + break; + } + + return NOTIFY_OK; +} + +static int ath10k_modem_init(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + void *notifier; + int ret; + + ar_snoc->nb.notifier_call = ath10k_snoc_modem_notify; + + notifier = qcom_register_ssr_notifier("mpss", &ar_snoc->nb); + if (IS_ERR(notifier)) { + ret = PTR_ERR(notifier); + ath10k_err(ar, "failed to initialize modem notifier: %d\n", ret); + return ret; + } + + ar_snoc->notifier = notifier; + + return 0; +} + +static void ath10k_modem_deinit(struct ath10k *ar) +{ + int ret; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + ret = qcom_unregister_ssr_notifier(ar_snoc->notifier, &ar_snoc->nb); + if (ret) + ath10k_err(ar, "error %d unregistering notifier\n", ret); +} + +static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size) +{ + struct device *dev = ar->dev; + struct resource r; + int ret; + + ret = of_reserved_mem_region_to_resource(dev->of_node, 0, &r); + if (!ret) { + ar->msa.paddr = r.start; + ar->msa.mem_size = resource_size(&r); + ar->msa.vaddr = devm_memremap(dev, ar->msa.paddr, + ar->msa.mem_size, + MEMREMAP_WT); + if (IS_ERR(ar->msa.vaddr)) { + dev_err(dev, "failed to map memory region: %pa\n", + &r.start); + return PTR_ERR(ar->msa.vaddr); + } + } else { + ar->msa.vaddr = dmam_alloc_coherent(dev, msa_size, + &ar->msa.paddr, + GFP_KERNEL); + if (!ar->msa.vaddr) { + ath10k_err(ar, "failed to allocate dma memory for msa region\n"); + return -ENOMEM; + } + ar->msa.mem_size = msa_size; + } + + ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa.paddr: %pad , msa.vaddr: 0x%p\n", + &ar->msa.paddr, + ar->msa.vaddr); + + return 0; +} + +static int ath10k_fw_init(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct device *host_dev = &ar_snoc->dev->dev; + struct platform_device_info info; + struct iommu_domain *iommu_dom; + struct platform_device *pdev; + struct device_node *node; + int ret; + + node = of_get_child_by_name(host_dev->of_node, "wifi-firmware"); + if (!node) { + ar_snoc->use_tz = true; + return 0; + } + + memset(&info, 0, sizeof(info)); + info.fwnode = &node->fwnode; + info.parent = host_dev; + info.name = node->name; + info.dma_mask = DMA_BIT_MASK(32); + + pdev = platform_device_register_full(&info); + if (IS_ERR(pdev)) { + of_node_put(node); + return PTR_ERR(pdev); + } + + pdev->dev.of_node = node; + + ret = of_dma_configure(&pdev->dev, node, true); + if (ret) { + ath10k_err(ar, "dma configure fail: %d\n", ret); + goto err_unregister; + } + + ar_snoc->fw.dev = &pdev->dev; + + iommu_dom = iommu_paging_domain_alloc(ar_snoc->fw.dev); + if (IS_ERR(iommu_dom)) { + ath10k_err(ar, "failed to allocate iommu domain\n"); + ret = PTR_ERR(iommu_dom); + goto err_unregister; + } + + ret = iommu_attach_device(iommu_dom, ar_snoc->fw.dev); + if (ret) { + ath10k_err(ar, "could not attach device: %d\n", ret); + goto err_iommu_free; + } + + ar_snoc->fw.iommu_domain = iommu_dom; + ar_snoc->fw.fw_start_addr = ar->msa.paddr; + + ret = iommu_map(iommu_dom, ar_snoc->fw.fw_start_addr, + ar->msa.paddr, ar->msa.mem_size, + IOMMU_READ | IOMMU_WRITE, GFP_KERNEL); + if (ret) { + ath10k_err(ar, "failed to map firmware region: %d\n", ret); + goto err_iommu_detach; + } + + of_node_put(node); + + return 0; + +err_iommu_detach: + iommu_detach_device(iommu_dom, ar_snoc->fw.dev); + +err_iommu_free: + iommu_domain_free(iommu_dom); + +err_unregister: + platform_device_unregister(pdev); + of_node_put(node); + + return ret; +} + +static int ath10k_fw_deinit(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + const size_t mapped_size = ar_snoc->fw.mapped_mem_size; + struct iommu_domain *iommu; + size_t unmapped_size; + + if (ar_snoc->use_tz) + return 0; + + iommu = ar_snoc->fw.iommu_domain; + + unmapped_size = iommu_unmap(iommu, ar_snoc->fw.fw_start_addr, + mapped_size); + if (unmapped_size != mapped_size) + ath10k_err(ar, "failed to unmap firmware: %zu\n", + unmapped_size); + + iommu_detach_device(iommu, ar_snoc->fw.dev); + iommu_domain_free(iommu); + + platform_device_unregister(to_platform_device(ar_snoc->fw.dev)); + + return 0; +} + +static const struct of_device_id ath10k_snoc_dt_match[] = { + { .compatible = "qcom,wcn3990-wifi", + .data = &drv_priv, + }, + { } +}; +MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match); + +static int ath10k_snoc_probe(struct platform_device *pdev) +{ + const struct ath10k_snoc_drv_priv *drv_data; + struct ath10k_snoc *ar_snoc; + struct device *dev; + struct ath10k *ar; + u32 msa_size; + int ret; + u32 i; + + dev = &pdev->dev; + drv_data = device_get_match_data(dev); + if (!drv_data) { + dev_err(dev, "failed to find matching device tree id\n"); + return -EINVAL; + } + + ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask); + if (ret) { + dev_err(dev, "failed to set dma mask: %d\n", ret); + return ret; + } + + ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC, + drv_data->hw_rev, &ath10k_snoc_hif_ops); + if (!ar) { + dev_err(dev, "failed to allocate core\n"); + return -ENOMEM; + } + + ar_snoc = ath10k_snoc_priv(ar); + ar_snoc->dev = pdev; + platform_set_drvdata(pdev, ar); + ar_snoc->ar = ar; + ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops; + ar->ce_priv = &ar_snoc->ce; + msa_size = drv_data->msa_size; + + ath10k_snoc_quirks_init(ar); + + ret = ath10k_snoc_resource_init(ar); + if (ret) { + ath10k_warn(ar, "failed to initialize resource: %d\n", ret); + goto err_core_destroy; + } + + ret = ath10k_snoc_setup_resource(ar); + if (ret) { + ath10k_warn(ar, "failed to setup resource: %d\n", ret); + goto err_core_destroy; + } + ret = ath10k_snoc_request_irq(ar); + if (ret) { + ath10k_warn(ar, "failed to request irqs: %d\n", ret); + goto err_release_resource; + } + + ar_snoc->num_vregs = ARRAY_SIZE(ath10k_regulators); + ar_snoc->vregs = devm_kcalloc(&pdev->dev, ar_snoc->num_vregs, + sizeof(*ar_snoc->vregs), GFP_KERNEL); + if (!ar_snoc->vregs) { + ret = -ENOMEM; + goto err_free_irq; + } + for (i = 0; i < ar_snoc->num_vregs; i++) + ar_snoc->vregs[i].supply = ath10k_regulators[i]; + + ret = devm_regulator_bulk_get(&pdev->dev, ar_snoc->num_vregs, + ar_snoc->vregs); + if (ret < 0) + goto err_free_irq; + + ar_snoc->num_clks = ARRAY_SIZE(ath10k_clocks); + ar_snoc->clks = devm_kcalloc(&pdev->dev, ar_snoc->num_clks, + sizeof(*ar_snoc->clks), GFP_KERNEL); + if (!ar_snoc->clks) { + ret = -ENOMEM; + goto err_free_irq; + } + + for (i = 0; i < ar_snoc->num_clks; i++) + ar_snoc->clks[i].id = ath10k_clocks[i]; + + ret = devm_clk_bulk_get_optional(&pdev->dev, ar_snoc->num_clks, + ar_snoc->clks); + if (ret) + goto err_free_irq; + + ret = ath10k_setup_msa_resources(ar, msa_size); + if (ret) { + ath10k_warn(ar, "failed to setup msa resources: %d\n", ret); + goto err_free_irq; + } + + ret = ath10k_fw_init(ar); + if (ret) { + ath10k_err(ar, "failed to initialize firmware: %d\n", ret); + goto err_free_irq; + } + + ret = ath10k_qmi_init(ar, msa_size); + if (ret) { + ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret); + goto err_fw_deinit; + } + + ret = ath10k_modem_init(ar); + if (ret) + goto err_qmi_deinit; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n"); + + return 0; + +err_qmi_deinit: + ath10k_qmi_deinit(ar); + +err_fw_deinit: + ath10k_fw_deinit(ar); + +err_free_irq: + ath10k_snoc_free_irq(ar); + +err_release_resource: + ath10k_snoc_release_resource(ar); + +err_core_destroy: + ath10k_core_destroy(ar); + + return ret; +} + +static int ath10k_snoc_free_resources(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc free resources\n"); + + set_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags); + + ath10k_core_unregister(ar); + ath10k_fw_deinit(ar); + ath10k_snoc_free_irq(ar); + ath10k_snoc_release_resource(ar); + ath10k_modem_deinit(ar); + ath10k_qmi_deinit(ar); + ath10k_core_destroy(ar); + + return 0; +} + +static void ath10k_snoc_remove(struct platform_device *pdev) +{ + struct ath10k *ar = platform_get_drvdata(pdev); + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n"); + + reinit_completion(&ar->driver_recovery); + + if (test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags)) + wait_for_completion_timeout(&ar->driver_recovery, 3 * HZ); + + ath10k_snoc_free_resources(ar); +} + +static void ath10k_snoc_shutdown(struct platform_device *pdev) +{ + struct ath10k *ar = platform_get_drvdata(pdev); + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc shutdown\n"); + ath10k_snoc_free_resources(ar); +} + +static struct platform_driver ath10k_snoc_driver = { + .probe = ath10k_snoc_probe, + .remove = ath10k_snoc_remove, + .shutdown = ath10k_snoc_shutdown, + .driver = { + .name = "ath10k_snoc", + .of_match_table = ath10k_snoc_dt_match, + }, +}; +module_platform_driver(ath10k_snoc_driver); + +MODULE_AUTHOR("Qualcomm"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices"); diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h new file mode 100644 index 000000000000..d4bce1707696 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/snoc.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + */ + +#ifndef _SNOC_H_ +#define _SNOC_H_ + +#include <linux/notifier.h> + +#include "hw.h" +#include "ce.h" +#include "qmi.h" + +struct ath10k_snoc_drv_priv { + enum ath10k_hw_rev hw_rev; + u64 dma_mask; + u32 msa_size; +}; + +struct snoc_state { + u32 pipe_cfg_addr; + u32 svc_to_pipe_map; +}; + +struct ath10k_snoc_pipe { + struct ath10k_ce_pipe *ce_hdl; + u8 pipe_num; + struct ath10k *hif_ce_state; + size_t buf_sz; + /* protect ce info */ + spinlock_t pipe_lock; + struct ath10k_snoc *ar_snoc; +}; + +struct ath10k_snoc_target_info { + u32 target_version; + u32 target_type; + u32 target_revision; + u32 soc_version; +}; + +struct ath10k_snoc_ce_irq { + u32 irq_line; +}; + +enum ath10k_snoc_flags { + ATH10K_SNOC_FLAG_REGISTERED, + ATH10K_SNOC_FLAG_UNREGISTERING, + ATH10K_SNOC_FLAG_MODEM_STOPPED, + ATH10K_SNOC_FLAG_RECOVERY, + ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, +}; + +struct clk_bulk_data; +struct regulator_bulk_data; + +struct ath10k_snoc { + struct platform_device *dev; + struct ath10k *ar; + unsigned int use_tz; + struct ath10k_firmware { + struct device *dev; + dma_addr_t fw_start_addr; + struct iommu_domain *iommu_domain; + size_t mapped_mem_size; + } fw; + void __iomem *mem; + dma_addr_t mem_pa; + struct ath10k_snoc_target_info target_info; + size_t mem_len; + struct ath10k_snoc_pipe pipe_info[CE_COUNT_MAX]; + struct ath10k_snoc_ce_irq ce_irqs[CE_COUNT_MAX]; + struct ath10k_ce ce; + struct timer_list rx_post_retry; + struct regulator_bulk_data *vregs; + size_t num_vregs; + struct clk_bulk_data *clks; + size_t num_clks; + struct ath10k_qmi *qmi; + struct notifier_block nb; + void *notifier; + unsigned long flags; + bool xo_cal_supported; + u32 xo_cal_data; + DECLARE_BITMAP(pending_ce_irqs, CE_COUNT_MAX); +}; + +static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar) +{ + return (struct ath10k_snoc *)ar->drv_priv; +} + +int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type); +void ath10k_snoc_fw_crashed_dump(struct ath10k *ar); + +#endif /* _SNOC_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c new file mode 100644 index 000000000000..2240994390ed --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/spectral.c @@ -0,0 +1,550 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2013-2017 Qualcomm Atheros, Inc. + */ + +#include <linux/relay.h> +#include "core.h" +#include "debug.h" +#include "wmi-ops.h" + +static void send_fft_sample(struct ath10k *ar, + const struct fft_sample_tlv *fft_sample_tlv) +{ + int length; + + if (!ar->spectral.rfs_chan_spec_scan) + return; + + length = __be16_to_cpu(fft_sample_tlv->length) + + sizeof(*fft_sample_tlv); + relay_write(ar->spectral.rfs_chan_spec_scan, fft_sample_tlv, length); +} + +static uint8_t get_max_exp(s8 max_index, u16 max_magnitude, size_t bin_len, + u8 *data) +{ + int dc_pos; + u8 max_exp; + + dc_pos = bin_len / 2; + + /* peak index outside of bins */ + if (dc_pos < max_index || -dc_pos >= max_index) + return 0; + + for (max_exp = 0; max_exp < 8; max_exp++) { + if (data[dc_pos + max_index] == (max_magnitude >> max_exp)) + break; + } + + /* max_exp not found */ + if (data[dc_pos + max_index] != (max_magnitude >> max_exp)) + return 0; + + return max_exp; +} + +static inline size_t ath10k_spectral_fix_bin_size(struct ath10k *ar, + size_t bin_len) +{ + /* some chipsets reports bin size as 2^n bytes + 'm' bytes in + * report mode 2. First 2^n bytes carries inband tones and last + * 'm' bytes carries band edge detection data mainly used in + * radar detection purpose. Strip last 'm' bytes to make bin size + * as a valid one. 'm' can take possible values of 4, 12. + */ + if (!is_power_of_2(bin_len)) + bin_len -= ar->hw_params.spectral_bin_discard; + + return bin_len; +} + +int ath10k_spectral_process_fft(struct ath10k *ar, + struct wmi_phyerr_ev_arg *phyerr, + const struct phyerr_fft_report *fftr, + size_t bin_len, u64 tsf) +{ + struct fft_sample_ath10k *fft_sample; + u8 buf[sizeof(*fft_sample) + SPECTRAL_ATH10K_MAX_NUM_BINS]; + u16 freq1, freq2, total_gain_db, base_pwr_db, length, peak_mag; + u32 reg0, reg1; + u8 chain_idx, *bins; + int dc_pos; + + fft_sample = (struct fft_sample_ath10k *)&buf; + + bin_len = ath10k_spectral_fix_bin_size(ar, bin_len); + + if (bin_len < 64 || bin_len > SPECTRAL_ATH10K_MAX_NUM_BINS) + return -EINVAL; + + reg0 = __le32_to_cpu(fftr->reg0); + reg1 = __le32_to_cpu(fftr->reg1); + + length = sizeof(*fft_sample) - sizeof(struct fft_sample_tlv) + bin_len; + fft_sample->tlv.type = ATH_FFT_SAMPLE_ATH10K; + fft_sample->tlv.length = __cpu_to_be16(length); + + /* TODO: there might be a reason why the hardware reports 20/40/80 MHz, + * but the results/plots suggest that its actually 22/44/88 MHz. + */ + switch (phyerr->chan_width_mhz) { + case 20: + fft_sample->chan_width_mhz = 22; + break; + case 40: + fft_sample->chan_width_mhz = 44; + break; + case 80: + /* TODO: As experiments with an analogue sender and various + * configurations (fft-sizes of 64/128/256 and 20/40/80 Mhz) + * show, the particular configuration of 80 MHz/64 bins does + * not match with the other samples at all. Until the reason + * for that is found, don't report these samples. + */ + if (bin_len == 64) + return -EINVAL; + fft_sample->chan_width_mhz = 88; + break; + default: + fft_sample->chan_width_mhz = phyerr->chan_width_mhz; + } + + fft_sample->relpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB); + fft_sample->avgpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB); + + peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG); + fft_sample->max_magnitude = __cpu_to_be16(peak_mag); + fft_sample->max_index = MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX); + fft_sample->rssi = phyerr->rssi_combined; + + total_gain_db = MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB); + base_pwr_db = MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB); + fft_sample->total_gain_db = __cpu_to_be16(total_gain_db); + fft_sample->base_pwr_db = __cpu_to_be16(base_pwr_db); + + freq1 = phyerr->freq1; + freq2 = phyerr->freq2; + fft_sample->freq1 = __cpu_to_be16(freq1); + fft_sample->freq2 = __cpu_to_be16(freq2); + + chain_idx = MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX); + + fft_sample->noise = __cpu_to_be16(phyerr->nf_chains[chain_idx]); + + bins = (u8 *)fftr; + bins += sizeof(*fftr) + ar->hw_params.spectral_bin_offset; + + fft_sample->tsf = __cpu_to_be64(tsf); + + /* max_exp has been directly reported by previous hardware (ath9k), + * maybe its possible to get it by other means? + */ + fft_sample->max_exp = get_max_exp(fft_sample->max_index, peak_mag, + bin_len, bins); + + memcpy(fft_sample->data, bins, bin_len); + + /* DC value (value in the middle) is the blind spot of the spectral + * sample and invalid, interpolate it. + */ + dc_pos = bin_len / 2; + fft_sample->data[dc_pos] = (fft_sample->data[dc_pos + 1] + + fft_sample->data[dc_pos - 1]) / 2; + + send_fft_sample(ar, &fft_sample->tlv); + + return 0; +} + +static struct ath10k_vif *ath10k_get_spectral_vdev(struct ath10k *ar) +{ + struct ath10k_vif *arvif; + + lockdep_assert_held(&ar->conf_mutex); + + if (list_empty(&ar->arvifs)) + return NULL; + + /* if there already is a vif doing spectral, return that. */ + list_for_each_entry(arvif, &ar->arvifs, list) + if (arvif->spectral_enabled) + return arvif; + + /* otherwise, return the first vif. */ + return list_first_entry(&ar->arvifs, typeof(*arvif), list); +} + +static int ath10k_spectral_scan_trigger(struct ath10k *ar) +{ + struct ath10k_vif *arvif; + int res; + int vdev_id; + + lockdep_assert_held(&ar->conf_mutex); + + arvif = ath10k_get_spectral_vdev(ar); + if (!arvif) + return -ENODEV; + vdev_id = arvif->vdev_id; + + if (ar->spectral.mode == SPECTRAL_DISABLED) + return 0; + + res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id, + WMI_SPECTRAL_TRIGGER_CMD_CLEAR, + WMI_SPECTRAL_ENABLE_CMD_ENABLE); + if (res < 0) + return res; + + res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id, + WMI_SPECTRAL_TRIGGER_CMD_TRIGGER, + WMI_SPECTRAL_ENABLE_CMD_ENABLE); + if (res < 0) + return res; + + return 0; +} + +static int ath10k_spectral_scan_config(struct ath10k *ar, + enum ath10k_spectral_mode mode) +{ + struct wmi_vdev_spectral_conf_arg arg; + struct ath10k_vif *arvif; + int vdev_id, count, res = 0; + + lockdep_assert_held(&ar->conf_mutex); + + arvif = ath10k_get_spectral_vdev(ar); + if (!arvif) + return -ENODEV; + + vdev_id = arvif->vdev_id; + + arvif->spectral_enabled = (mode != SPECTRAL_DISABLED); + ar->spectral.mode = mode; + + res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id, + WMI_SPECTRAL_TRIGGER_CMD_CLEAR, + WMI_SPECTRAL_ENABLE_CMD_DISABLE); + if (res < 0) { + ath10k_warn(ar, "failed to enable spectral scan: %d\n", res); + return res; + } + + if (mode == SPECTRAL_DISABLED) + return 0; + + if (mode == SPECTRAL_BACKGROUND) + count = WMI_SPECTRAL_COUNT_DEFAULT; + else + count = max_t(u8, 1, ar->spectral.config.count); + + arg.vdev_id = vdev_id; + arg.scan_count = count; + arg.scan_period = WMI_SPECTRAL_PERIOD_DEFAULT; + arg.scan_priority = WMI_SPECTRAL_PRIORITY_DEFAULT; + arg.scan_fft_size = ar->spectral.config.fft_size; + arg.scan_gc_ena = WMI_SPECTRAL_GC_ENA_DEFAULT; + arg.scan_restart_ena = WMI_SPECTRAL_RESTART_ENA_DEFAULT; + arg.scan_noise_floor_ref = WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT; + arg.scan_init_delay = WMI_SPECTRAL_INIT_DELAY_DEFAULT; + arg.scan_nb_tone_thr = WMI_SPECTRAL_NB_TONE_THR_DEFAULT; + arg.scan_str_bin_thr = WMI_SPECTRAL_STR_BIN_THR_DEFAULT; + arg.scan_wb_rpt_mode = WMI_SPECTRAL_WB_RPT_MODE_DEFAULT; + arg.scan_rssi_rpt_mode = WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT; + arg.scan_rssi_thr = WMI_SPECTRAL_RSSI_THR_DEFAULT; + arg.scan_pwr_format = WMI_SPECTRAL_PWR_FORMAT_DEFAULT; + arg.scan_rpt_mode = WMI_SPECTRAL_RPT_MODE_DEFAULT; + arg.scan_bin_scale = WMI_SPECTRAL_BIN_SCALE_DEFAULT; + arg.scan_dbm_adj = WMI_SPECTRAL_DBM_ADJ_DEFAULT; + arg.scan_chn_mask = WMI_SPECTRAL_CHN_MASK_DEFAULT; + + res = ath10k_wmi_vdev_spectral_conf(ar, &arg); + if (res < 0) { + ath10k_warn(ar, "failed to configure spectral scan: %d\n", res); + return res; + } + + return 0; +} + +static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char *mode = ""; + size_t len; + enum ath10k_spectral_mode spectral_mode; + + mutex_lock(&ar->conf_mutex); + spectral_mode = ar->spectral.mode; + mutex_unlock(&ar->conf_mutex); + + switch (spectral_mode) { + case SPECTRAL_DISABLED: + mode = "disable"; + break; + case SPECTRAL_BACKGROUND: + mode = "background"; + break; + case SPECTRAL_MANUAL: + mode = "manual"; + break; + } + + len = strlen(mode); + return simple_read_from_buffer(user_buf, count, ppos, mode, len); +} + +static ssize_t write_file_spec_scan_ctl(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char buf[32]; + ssize_t len; + int res; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + + mutex_lock(&ar->conf_mutex); + + if (strncmp("trigger", buf, 7) == 0) { + if (ar->spectral.mode == SPECTRAL_MANUAL || + ar->spectral.mode == SPECTRAL_BACKGROUND) { + /* reset the configuration to adopt possibly changed + * debugfs parameters + */ + res = ath10k_spectral_scan_config(ar, + ar->spectral.mode); + if (res < 0) { + ath10k_warn(ar, "failed to reconfigure spectral scan: %d\n", + res); + } + res = ath10k_spectral_scan_trigger(ar); + if (res < 0) { + ath10k_warn(ar, "failed to trigger spectral scan: %d\n", + res); + } + } else { + res = -EINVAL; + } + } else if (strncmp("background", buf, 10) == 0) { + res = ath10k_spectral_scan_config(ar, SPECTRAL_BACKGROUND); + } else if (strncmp("manual", buf, 6) == 0) { + res = ath10k_spectral_scan_config(ar, SPECTRAL_MANUAL); + } else if (strncmp("disable", buf, 7) == 0) { + res = ath10k_spectral_scan_config(ar, SPECTRAL_DISABLED); + } else { + res = -EINVAL; + } + + mutex_unlock(&ar->conf_mutex); + + if (res < 0) + return res; + + return count; +} + +static const struct file_operations fops_spec_scan_ctl = { + .read = read_file_spec_scan_ctl, + .write = write_file_spec_scan_ctl, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t read_file_spectral_count(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char buf[32]; + size_t len; + u8 spectral_count; + + mutex_lock(&ar->conf_mutex); + spectral_count = ar->spectral.config.count; + mutex_unlock(&ar->conf_mutex); + + len = sprintf(buf, "%d\n", spectral_count); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t write_file_spectral_count(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + unsigned long val; + ssize_t ret; + + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; + + if (val > 255) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + ar->spectral.config.count = val; + mutex_unlock(&ar->conf_mutex); + + return count; +} + +static const struct file_operations fops_spectral_count = { + .read = read_file_spectral_count, + .write = write_file_spectral_count, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t read_file_spectral_bins(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char buf[32]; + unsigned int bins, fft_size, bin_scale; + size_t len; + + mutex_lock(&ar->conf_mutex); + + fft_size = ar->spectral.config.fft_size; + bin_scale = WMI_SPECTRAL_BIN_SCALE_DEFAULT; + bins = 1 << (fft_size - bin_scale); + + mutex_unlock(&ar->conf_mutex); + + len = sprintf(buf, "%d\n", bins); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t write_file_spectral_bins(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + unsigned long val; + ssize_t ret; + + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; + + if (val < 64 || val > SPECTRAL_ATH10K_MAX_NUM_BINS) + return -EINVAL; + + if (!is_power_of_2(val)) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + ar->spectral.config.fft_size = ilog2(val); + ar->spectral.config.fft_size += WMI_SPECTRAL_BIN_SCALE_DEFAULT; + mutex_unlock(&ar->conf_mutex); + + return count; +} + +static const struct file_operations fops_spectral_bins = { + .read = read_file_spectral_bins, + .write = write_file_spectral_bins, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static struct dentry *create_buf_file_handler(const char *filename, + struct dentry *parent, + umode_t mode, + struct rchan_buf *buf, + int *is_global) +{ + struct dentry *buf_file; + + buf_file = debugfs_create_file(filename, mode, parent, buf, + &relay_file_operations); + if (IS_ERR(buf_file)) + return NULL; + + *is_global = 1; + return buf_file; +} + +static int remove_buf_file_handler(struct dentry *dentry) +{ + debugfs_remove(dentry); + + return 0; +} + +static const struct rchan_callbacks rfs_spec_scan_cb = { + .create_buf_file = create_buf_file_handler, + .remove_buf_file = remove_buf_file_handler, +}; + +int ath10k_spectral_start(struct ath10k *ar) +{ + struct ath10k_vif *arvif; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) + arvif->spectral_enabled = 0; + + ar->spectral.mode = SPECTRAL_DISABLED; + ar->spectral.config.count = WMI_SPECTRAL_COUNT_DEFAULT; + ar->spectral.config.fft_size = WMI_SPECTRAL_FFT_SIZE_DEFAULT; + + return 0; +} + +int ath10k_spectral_vif_stop(struct ath10k_vif *arvif) +{ + if (!arvif->spectral_enabled) + return 0; + + return ath10k_spectral_scan_config(arvif->ar, SPECTRAL_DISABLED); +} + +int ath10k_spectral_create(struct ath10k *ar) +{ + /* The buffer size covers whole channels in dual bands up to 128 bins. + * Scan with bigger than 128 bins needs to be run on single band each. + */ + ar->spectral.rfs_chan_spec_scan = relay_open("spectral_scan", + ar->debug.debugfs_phy, + 1140, 2500, + &rfs_spec_scan_cb, NULL); + debugfs_create_file("spectral_scan_ctl", + 0600, + ar->debug.debugfs_phy, ar, + &fops_spec_scan_ctl); + debugfs_create_file("spectral_count", + 0600, + ar->debug.debugfs_phy, ar, + &fops_spectral_count); + debugfs_create_file("spectral_bins", + 0600, + ar->debug.debugfs_phy, ar, + &fops_spectral_bins); + + return 0; +} + +void ath10k_spectral_destroy(struct ath10k *ar) +{ + if (ar->spectral.rfs_chan_spec_scan) { + relay_close(ar->spectral.rfs_chan_spec_scan); + ar->spectral.rfs_chan_spec_scan = NULL; + } +} diff --git a/drivers/net/wireless/ath/ath10k/spectral.h b/drivers/net/wireless/ath/ath10k/spectral.h new file mode 100644 index 000000000000..5f481f11c6e5 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/spectral.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2013-2015 Qualcomm Atheros, Inc. + */ + +#ifndef SPECTRAL_H +#define SPECTRAL_H + +#include "../spectral_common.h" + +/** + * struct ath10k_spec_scan - parameters for Atheros spectral scan + * + * @count: number of scan results requested for manual mode + * @fft_size: number of bins to be requested = 2^(fft_size - bin_scale) + */ +struct ath10k_spec_scan { + u8 count; + u8 fft_size; +}; + +/* enum ath10k_spectral_mode: + * + * @SPECTRAL_DISABLED: spectral mode is disabled + * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with + * something else. + * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples + * is performed manually. + */ +enum ath10k_spectral_mode { + SPECTRAL_DISABLED = 0, + SPECTRAL_BACKGROUND, + SPECTRAL_MANUAL, +}; + +#ifdef CONFIG_ATH10K_SPECTRAL + +int ath10k_spectral_process_fft(struct ath10k *ar, + struct wmi_phyerr_ev_arg *phyerr, + const struct phyerr_fft_report *fftr, + size_t bin_len, u64 tsf); +int ath10k_spectral_start(struct ath10k *ar); +int ath10k_spectral_vif_stop(struct ath10k_vif *arvif); +int ath10k_spectral_create(struct ath10k *ar); +void ath10k_spectral_destroy(struct ath10k *ar); + +#else + +static inline int +ath10k_spectral_process_fft(struct ath10k *ar, + struct wmi_phyerr_ev_arg *phyerr, + const struct phyerr_fft_report *fftr, + size_t bin_len, u64 tsf) +{ + return 0; +} + +static inline int ath10k_spectral_start(struct ath10k *ar) +{ + return 0; +} + +static inline int ath10k_spectral_vif_stop(struct ath10k_vif *arvif) +{ + return 0; +} + +static inline int ath10k_spectral_create(struct ath10k *ar) +{ + return 0; +} + +static inline void ath10k_spectral_destroy(struct ath10k *ar) +{ +} + +#endif /* CONFIG_ATH10K_SPECTRAL */ + +#endif /* SPECTRAL_H */ diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c new file mode 100644 index 000000000000..7198a386f2fb --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/swap.c @@ -0,0 +1,195 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2015-2016 Qualcomm Atheros, Inc. + */ + +/* This file has implementation for code swap logic. With code swap feature, + * target can run the fw binary with even smaller IRAM size by using host + * memory to store some of the code segments. + */ + +#include "core.h" +#include "bmi.h" +#include "debug.h" + +static int ath10k_swap_code_seg_fill(struct ath10k *ar, + struct ath10k_swap_code_seg_info *seg_info, + const void *data, size_t data_len) +{ + u8 *virt_addr = seg_info->virt_address[0]; + u8 swap_magic[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ] = {}; + const u8 *fw_data = data; + union ath10k_swap_code_seg_item *swap_item; + u32 length = 0; + u32 payload_len; + u32 total_payload_len = 0; + u32 size_left = data_len; + + /* Parse swap bin and copy the content to host allocated memory. + * The format is Address, length and value. The last 4-bytes is + * target write address. Currently address field is not used. + */ + seg_info->target_addr = -1; + while (size_left >= sizeof(*swap_item)) { + swap_item = (union ath10k_swap_code_seg_item *)fw_data; + payload_len = __le32_to_cpu(swap_item->tlv.length); + if ((payload_len > size_left) || + (payload_len == 0 && + size_left != sizeof(struct ath10k_swap_code_seg_tail))) { + ath10k_err(ar, "refusing to parse invalid tlv length %d\n", + payload_len); + return -EINVAL; + } + + if (payload_len == 0) { + if (memcmp(swap_item->tail.magic_signature, swap_magic, + ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ)) { + ath10k_err(ar, "refusing an invalid swap file\n"); + return -EINVAL; + } + seg_info->target_addr = + __le32_to_cpu(swap_item->tail.bmi_write_addr); + break; + } + + memcpy(virt_addr, swap_item->tlv.data, payload_len); + virt_addr += payload_len; + length = payload_len + sizeof(struct ath10k_swap_code_seg_tlv); + size_left -= length; + fw_data += length; + total_payload_len += payload_len; + } + + if (seg_info->target_addr == -1) { + ath10k_err(ar, "failed to parse invalid swap file\n"); + return -EINVAL; + } + seg_info->seg_hw_info.swap_size = __cpu_to_le32(total_payload_len); + + return 0; +} + +static void +ath10k_swap_code_seg_free(struct ath10k *ar, + struct ath10k_swap_code_seg_info *seg_info) +{ + u32 seg_size; + + if (!seg_info) + return; + + if (!seg_info->virt_address[0]) + return; + + seg_size = __le32_to_cpu(seg_info->seg_hw_info.size); + dma_free_coherent(ar->dev, seg_size, seg_info->virt_address[0], + seg_info->paddr[0]); +} + +static struct ath10k_swap_code_seg_info * +ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len) +{ + struct ath10k_swap_code_seg_info *seg_info; + void *virt_addr; + dma_addr_t paddr; + + swap_bin_len = roundup(swap_bin_len, 2); + if (swap_bin_len > ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX) { + ath10k_err(ar, "refusing code swap bin because it is too big %zu > %d\n", + swap_bin_len, ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX); + return NULL; + } + + seg_info = devm_kzalloc(ar->dev, sizeof(*seg_info), GFP_KERNEL); + if (!seg_info) + return NULL; + + virt_addr = dma_alloc_coherent(ar->dev, swap_bin_len, &paddr, + GFP_KERNEL); + if (!virt_addr) + return NULL; + + seg_info->seg_hw_info.bus_addr[0] = __cpu_to_le32(paddr); + seg_info->seg_hw_info.size = __cpu_to_le32(swap_bin_len); + seg_info->seg_hw_info.swap_size = __cpu_to_le32(swap_bin_len); + seg_info->seg_hw_info.num_segs = + __cpu_to_le32(ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED); + seg_info->seg_hw_info.size_log2 = __cpu_to_le32(ilog2(swap_bin_len)); + seg_info->virt_address[0] = virt_addr; + seg_info->paddr[0] = paddr; + + return seg_info; +} + +int ath10k_swap_code_seg_configure(struct ath10k *ar, + const struct ath10k_fw_file *fw_file) +{ + int ret; + struct ath10k_swap_code_seg_info *seg_info = NULL; + + if (!fw_file->firmware_swap_code_seg_info) + return 0; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n"); + + seg_info = fw_file->firmware_swap_code_seg_info; + + ret = ath10k_bmi_write_memory(ar, seg_info->target_addr, + &seg_info->seg_hw_info, + sizeof(seg_info->seg_hw_info)); + if (ret) { + ath10k_err(ar, "failed to write Code swap segment information (%d)\n", + ret); + return ret; + } + + return 0; +} + +void ath10k_swap_code_seg_release(struct ath10k *ar, + struct ath10k_fw_file *fw_file) +{ + ath10k_swap_code_seg_free(ar, fw_file->firmware_swap_code_seg_info); + + /* FIXME: these two assignments look to bein wrong place! Shouldn't + * they be in ath10k_core_free_firmware_files() like the rest? + */ + fw_file->codeswap_data = NULL; + fw_file->codeswap_len = 0; + + fw_file->firmware_swap_code_seg_info = NULL; +} + +int ath10k_swap_code_seg_init(struct ath10k *ar, struct ath10k_fw_file *fw_file) +{ + int ret; + struct ath10k_swap_code_seg_info *seg_info; + const void *codeswap_data; + size_t codeswap_len; + + codeswap_data = fw_file->codeswap_data; + codeswap_len = fw_file->codeswap_len; + + if (!codeswap_len || !codeswap_data) + return 0; + + seg_info = ath10k_swap_code_seg_alloc(ar, codeswap_len); + if (!seg_info) { + ath10k_err(ar, "failed to allocate fw code swap segment\n"); + return -ENOMEM; + } + + ret = ath10k_swap_code_seg_fill(ar, seg_info, + codeswap_data, codeswap_len); + + if (ret) { + ath10k_warn(ar, "failed to initialize fw code swap segment: %d\n", + ret); + ath10k_swap_code_seg_free(ar, seg_info); + return ret; + } + + fw_file->firmware_swap_code_seg_info = seg_info; + + return 0; +} diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h new file mode 100644 index 000000000000..b4733b5ded34 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/swap.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2015-2016 Qualcomm Atheros, Inc. + */ + +#ifndef _SWAP_H_ +#define _SWAP_H_ + +#define ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX (512 * 1024) +#define ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ 12 +#define ATH10K_SWAP_CODE_SEG_NUM_MAX 16 +/* Currently only one swap segment is supported */ +#define ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED 1 + +struct ath10k_fw_file; + +struct ath10k_swap_code_seg_tlv { + __le32 address; + __le32 length; + u8 data[]; +} __packed; + +struct ath10k_swap_code_seg_tail { + u8 magic_signature[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ]; + __le32 bmi_write_addr; +} __packed; + +union ath10k_swap_code_seg_item { + struct ath10k_swap_code_seg_tlv tlv; + struct ath10k_swap_code_seg_tail tail; +} __packed; + +struct ath10k_swap_code_seg_hw_info { + /* Swap binary image size */ + __le32 swap_size; + __le32 num_segs; + + /* Swap data size */ + __le32 size; + __le32 size_log2; + __le32 bus_addr[ATH10K_SWAP_CODE_SEG_NUM_MAX]; + __le64 reserved[ATH10K_SWAP_CODE_SEG_NUM_MAX]; +} __packed; + +struct ath10k_swap_code_seg_info { + struct ath10k_swap_code_seg_hw_info seg_hw_info; + void *virt_address[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED]; + u32 target_addr; + dma_addr_t paddr[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED]; +}; + +int ath10k_swap_code_seg_configure(struct ath10k *ar, + const struct ath10k_fw_file *fw_file); +void ath10k_swap_code_seg_release(struct ath10k *ar, + struct ath10k_fw_file *fw_file); +int ath10k_swap_code_seg_init(struct ath10k *ar, + struct ath10k_fw_file *fw_file); + +#endif diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h index be7ba1e78afe..ba37e6c7ced0 100644 --- a/drivers/net/wireless/ath/ath10k/targaddrs.h +++ b/drivers/net/wireless/ath/ath10k/targaddrs.h @@ -1,23 +1,14 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2016 Qualcomm Atheros, Inc. */ #ifndef __TARGADDRS_H__ #define __TARGADDRS_H__ +#include "hw.h" + /* * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the * host_interest structure. It must match the address of the _host_interest @@ -203,6 +194,24 @@ struct host_interest { */ /* Bit 1 - unused */ u32 hi_fw_swap; /* 0x104 */ + + /* global arenas pointer address, used by host driver debug */ + u32 hi_dynamic_mem_arenas_addr; /* 0x108 */ + + /* allocated bytes of DRAM use by allocated */ + u32 hi_dynamic_mem_allocated; /* 0x10C */ + + /* remaining bytes of DRAM */ + u32 hi_dynamic_mem_remaining; /* 0x110 */ + + /* memory track count, configured by host */ + u32 hi_dynamic_mem_track_max; /* 0x114 */ + + /* minidump buffer */ + u32 hi_minidump; /* 0x118 */ + + /* bdata's sig and key addr */ + u32 hi_bd_sig_key; /* 0x11c */ } __packed; #define HI_ITEM(item) offsetof(struct host_interest, item) @@ -266,13 +275,13 @@ struct host_interest { #define HI_OPTION_FW_BRIDGE_SHIFT 0x04 /* -Fw Mode/SubMode Mask -|-----------------------------------------------------------------------------| -| SUB | SUB | SUB | SUB | | | | | -|MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0]| -| (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) | -|-----------------------------------------------------------------------------| -*/ + * Fw Mode/SubMode Mask + *----------------------------------------------------------------------------- + * SUB | SUB | SUB | SUB | | | | + *MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0] + * (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) + *----------------------------------------------------------------------------- + */ #define HI_OPTION_FW_MODE_BITS 0x2 #define HI_OPTION_FW_MODE_MASK 0x3 #define HI_OPTION_FW_MODE_SHIFT 0xC @@ -284,7 +293,6 @@ Fw Mode/SubMode Mask #define HI_OPTION_ALL_FW_SUBMODE_MASK 0xFF00 #define HI_OPTION_ALL_FW_SUBMODE_SHIFT 0x8 - /* hi_option_flag2 options */ #define HI_OPTION_OFFLOAD_AMSDU 0x01 #define HI_OPTION_DFS_SUPPORT 0x02 /* Enable DFS support */ @@ -318,6 +326,23 @@ Fw Mode/SubMode Mask #define HI_ACS_FLAGS_USE_WWAN (1 << 1) /* Use test VAP */ #define HI_ACS_FLAGS_TEST_VAP (1 << 2) +/* SDIO/mailbox ACS flag definitions */ +#define HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET (1 << 0) +#define HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET (1 << 1) +#define HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE (1 << 2) +#define HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK (1 << 16) +#define HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_FW_ACK (1 << 17) + +/* + * If both SDIO_CRASH_DUMP_ENHANCEMENT_HOST and SDIO_CRASH_DUMP_ENHANCEMENT_FW + * flags are set, then crashdump upload will be done using the BMI host/target + * communication channel. + */ +/* HOST to support using BMI dump FW memory when hit assert */ +#define HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_HOST 0x400 + +/* FW to support using BMI dump FW memory when hit assert */ +#define HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_FW 0x800 /* * CONSOLE FLAGS @@ -404,7 +429,7 @@ Fw Mode/SubMode Mask * 1. target firmware would check magic number and if it's a match, firmware * would consider the bits[0:15] are valid and base on that to calculate * the end of DRAM. Early allocation would be located at that area and - * may be reclaimed when necesary + * may be reclaimed when necessary * 2. if no magic number is found, early allocation would happen at "_end" * symbol of ROM which is located before the app-data and might NOT be * re-claimable. If this is adopted, link script should keep this in @@ -427,8 +452,9 @@ Fw Mode/SubMode Mask #define HI_PWR_SAVE_LPL_ENABLED 0x1 /*b1-b3 reserved*/ /*b4-b5 : dev0 LPL type : 0 - none - 1- Reduce Pwr Search - 2- Reduce Pwr Listen*/ + * 1- Reduce Pwr Search + * 2- Reduce Pwr Listen + */ /*b6-b7 : dev1 LPL type and so on for Max 8 devices*/ #define HI_PWR_SAVE_LPL_DEV0_LSB 4 #define HI_PWR_SAVE_LPL_DEV_MASK 0x3 @@ -437,7 +463,7 @@ Fw Mode/SubMode Mask ((HOST_INTEREST->hi_pwr_save_flags & HI_PWR_SAVE_LPL_ENABLED)) #define HI_DEV_LPL_TYPE_GET(_devix) \ (HOST_INTEREST->hi_pwr_save_flags & ((HI_PWR_SAVE_LPL_DEV_MASK) << \ - (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix)*2))) + (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix) * 2))) #define HOST_INTEREST_SMPS_IS_ALLOWED() \ ((HOST_INTEREST->hi_smps_options & HI_SMPS_ALLOW_MASK)) @@ -446,4 +472,26 @@ Fw Mode/SubMode Mask #define QCA988X_BOARD_DATA_SZ 7168 #define QCA988X_BOARD_EXT_DATA_SZ 0 +#define QCA9887_BOARD_DATA_SZ 7168 +#define QCA9887_BOARD_EXT_DATA_SZ 0 + +#define QCA6174_BOARD_DATA_SZ 8192 +#define QCA6174_BOARD_EXT_DATA_SZ 0 + +#define QCA9377_BOARD_DATA_SZ QCA6174_BOARD_DATA_SZ +#define QCA9377_BOARD_EXT_DATA_SZ 0 + +#define QCA99X0_BOARD_DATA_SZ 12288 +#define QCA99X0_BOARD_EXT_DATA_SZ 0 + +/* Dual band extended board data */ +#define QCA99X0_EXT_BOARD_DATA_SZ 2048 +#define EXT_BOARD_ADDRESS_OFFSET 0x3000 + +#define QCA4019_BOARD_DATA_SZ 12064 +#define QCA4019_BOARD_EXT_DATA_SZ 0 + +#define WCN3990_BOARD_DATA_SZ 26328 +#define WCN3990_BOARD_EXT_DATA_SZ 0 + #endif /* __TARGADDRS_H__ */ diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c new file mode 100644 index 000000000000..d3bd385694d6 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/testmode.c @@ -0,0 +1,662 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + +#include "testmode.h" + +#include <net/netlink.h> +#include <linux/firmware.h> + +#include "debug.h" +#include "wmi.h" +#include "wmi-tlv.h" +#include "hif.h" +#include "hw.h" +#include "core.h" + +#include "testmode_i.h" + +#define ATH10K_FTM_SEG_NONE ((u32)-1) +#define ATH10K_FTM_SEGHDR_CURRENT_SEQ GENMASK(3, 0) +#define ATH10K_FTM_SEGHDR_TOTAL_SEGMENTS GENMASK(7, 4) + +static const struct nla_policy ath10k_tm_policy[ATH10K_TM_ATTR_MAX + 1] = { + [ATH10K_TM_ATTR_CMD] = { .type = NLA_U32 }, + [ATH10K_TM_ATTR_DATA] = { .type = NLA_BINARY, + .len = ATH10K_TM_DATA_MAX_LEN }, + [ATH10K_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 }, + [ATH10K_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 }, + [ATH10K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 }, +}; + +static void ath10k_tm_event_unsegmented(struct ath10k *ar, u32 cmd_id, + struct sk_buff *skb) +{ + struct sk_buff *nl_skb; + int ret; + + nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy, + 2 * sizeof(u32) + skb->len, + GFP_ATOMIC); + if (!nl_skb) { + ath10k_warn(ar, + "failed to allocate skb for testmode wmi event\n"); + return; + } + + ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_CMD, ATH10K_TM_CMD_WMI); + if (ret) { + ath10k_warn(ar, + "failed to put testmode wmi event cmd attribute: %d\n", + ret); + kfree_skb(nl_skb); + return; + } + + ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_WMI_CMDID, cmd_id); + if (ret) { + ath10k_warn(ar, + "failed to put testmode wmi event cmd_id: %d\n", + ret); + kfree_skb(nl_skb); + return; + } + + ret = nla_put(nl_skb, ATH10K_TM_ATTR_DATA, skb->len, skb->data); + if (ret) { + ath10k_warn(ar, + "failed to copy skb to testmode wmi event: %d\n", + ret); + kfree_skb(nl_skb); + return; + } + + cfg80211_testmode_event(nl_skb, GFP_ATOMIC); +} + +static void ath10k_tm_event_segmented(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb) +{ + struct wmi_ftm_cmd *ftm = (struct wmi_ftm_cmd *)skb->data; + u8 total_segments, current_seq; + struct sk_buff *nl_skb; + u8 const *buf_pos; + u16 datalen; + u32 data_pos; + int ret; + + if (skb->len < sizeof(*ftm)) { + ath10k_warn(ar, "Invalid ftm event length: %d\n", skb->len); + return; + } + + current_seq = FIELD_GET(ATH10K_FTM_SEGHDR_CURRENT_SEQ, + __le32_to_cpu(ftm->seg_hdr.segmentinfo)); + total_segments = FIELD_GET(ATH10K_FTM_SEGHDR_TOTAL_SEGMENTS, + __le32_to_cpu(ftm->seg_hdr.segmentinfo)); + datalen = skb->len - sizeof(*ftm); + buf_pos = ftm->data; + + if (current_seq == 0) { + ar->testmode.expected_seq = 0; + ar->testmode.data_pos = 0; + } + + data_pos = ar->testmode.data_pos; + + if ((data_pos + datalen) > ATH_FTM_EVENT_MAX_BUF_LENGTH) { + ath10k_warn(ar, "Invalid ftm event length at %u: %u\n", + data_pos, datalen); + ret = -EINVAL; + return; + } + + memcpy(&ar->testmode.eventdata[data_pos], buf_pos, datalen); + data_pos += datalen; + + if (++ar->testmode.expected_seq != total_segments) { + ar->testmode.data_pos = data_pos; + ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "partial data received %u/%u\n", + current_seq + 1, total_segments); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "total data length %u\n", data_pos); + + nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy, + 2 * sizeof(u32) + data_pos, + GFP_ATOMIC); + if (!nl_skb) { + ath10k_warn(ar, "failed to allocate skb for testmode wmi event\n"); + return; + } + + ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_CMD, ATH10K_TM_CMD_TLV); + if (ret) { + ath10k_warn(ar, "failed to put testmode wmi event attribute: %d\n", ret); + kfree_skb(nl_skb); + return; + } + + ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_WMI_CMDID, cmd_id); + if (ret) { + ath10k_warn(ar, "failed to put testmode wmi event cmd_id: %d\n", ret); + kfree_skb(nl_skb); + return; + } + + ret = nla_put(nl_skb, ATH10K_TM_ATTR_DATA, data_pos, &ar->testmode.eventdata[0]); + if (ret) { + ath10k_warn(ar, "failed to copy skb to testmode wmi event: %d\n", ret); + kfree_skb(nl_skb); + return; + } + + cfg80211_testmode_event(nl_skb, GFP_ATOMIC); +} + +/* Returns true if callee consumes the skb and the skb should be discarded. + * Returns false if skb is not used. Does not sleep. + */ +bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb) +{ + bool consumed; + + ath10k_dbg(ar, ATH10K_DBG_TESTMODE, + "testmode event wmi cmd_id %d skb %p skb->len %d\n", + cmd_id, skb, skb->len); + + ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len); + + spin_lock_bh(&ar->data_lock); + + if (!ar->testmode.utf_monitor) { + consumed = false; + goto out; + } + + /* Only testmode.c should be handling events from utf firmware, + * otherwise all sort of problems will arise as mac80211 operations + * are not initialised. + */ + consumed = true; + + if (ar->testmode.expected_seq != ATH10K_FTM_SEG_NONE) + ath10k_tm_event_segmented(ar, cmd_id, skb); + else + ath10k_tm_event_unsegmented(ar, cmd_id, skb); + +out: + spin_unlock_bh(&ar->data_lock); + + return consumed; +} + +static int ath10k_tm_cmd_get_version(struct ath10k *ar, struct nlattr *tb[]) +{ + struct sk_buff *skb; + int ret; + + ath10k_dbg(ar, ATH10K_DBG_TESTMODE, + "testmode cmd get version_major %d version_minor %d\n", + ATH10K_TESTMODE_VERSION_MAJOR, + ATH10K_TESTMODE_VERSION_MINOR); + + skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy, + nla_total_size(sizeof(u32))); + if (!skb) + return -ENOMEM; + + ret = nla_put_u32(skb, ATH10K_TM_ATTR_VERSION_MAJOR, + ATH10K_TESTMODE_VERSION_MAJOR); + if (ret) { + kfree_skb(skb); + return ret; + } + + ret = nla_put_u32(skb, ATH10K_TM_ATTR_VERSION_MINOR, + ATH10K_TESTMODE_VERSION_MINOR); + if (ret) { + kfree_skb(skb); + return ret; + } + + ret = nla_put_u32(skb, ATH10K_TM_ATTR_WMI_OP_VERSION, + ar->normal_mode_fw.fw_file.wmi_op_version); + if (ret) { + kfree_skb(skb); + return ret; + } + + return cfg80211_testmode_reply(skb); +} + +static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar, + struct ath10k_fw_file *fw_file) +{ + char filename[100]; + int ret; + + snprintf(filename, sizeof(filename), "%s/%s", + ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE); + + /* load utf firmware image */ + ret = firmware_request_nowarn(&fw_file->firmware, filename, ar->dev); + ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode fw request '%s': %d\n", + filename, ret); + + if (ret) { + ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n", + filename, ret); + return ret; + } + + /* We didn't find FW UTF API 1 ("utf.bin") does not advertise + * firmware features. Do an ugly hack where we force the firmware + * features to match with 10.1 branch so that wmi.c will use the + * correct WMI interface. + */ + + fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_1; + fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_10_1; + fw_file->firmware_data = fw_file->firmware->data; + fw_file->firmware_len = fw_file->firmware->size; + + return 0; +} + +static int ath10k_tm_fetch_firmware(struct ath10k *ar) +{ + struct ath10k_fw_components *utf_mode_fw; + int ret; + char fw_name[100]; + int fw_api2 = 2; + + switch (ar->hif.bus) { + case ATH10K_BUS_SDIO: + case ATH10K_BUS_USB: + scnprintf(fw_name, sizeof(fw_name), "%s-%s-%d.bin", + ATH10K_FW_UTF_FILE_BASE, ath10k_bus_str(ar->hif.bus), + fw_api2); + break; + default: + scnprintf(fw_name, sizeof(fw_name), "%s-%d.bin", + ATH10K_FW_UTF_FILE_BASE, fw_api2); + break; + } + + ret = ath10k_core_fetch_firmware_api_n(ar, fw_name, + &ar->testmode.utf_mode_fw.fw_file); + if (ret == 0) { + ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using fw utf api 2"); + goto out; + } + + ret = ath10k_tm_fetch_utf_firmware_api_1(ar, &ar->testmode.utf_mode_fw.fw_file); + if (ret) { + ath10k_err(ar, "failed to fetch utf firmware binary: %d", ret); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using utf api 1"); + +out: + utf_mode_fw = &ar->testmode.utf_mode_fw; + + /* Use the same board data file as the normal firmware uses (but + * it's still "owned" by normal_mode_fw so we shouldn't free it. + */ + utf_mode_fw->board_data = ar->normal_mode_fw.board_data; + utf_mode_fw->board_len = ar->normal_mode_fw.board_len; + + if (!utf_mode_fw->fw_file.otp_data) { + ath10k_info(ar, "utf.bin didn't contain otp binary, taking it from the normal mode firmware"); + utf_mode_fw->fw_file.otp_data = ar->normal_mode_fw.fw_file.otp_data; + utf_mode_fw->fw_file.otp_len = ar->normal_mode_fw.fw_file.otp_len; + } + + return 0; +} + +static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[]) +{ + const char *ver; + int ret; + + ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode cmd utf start\n"); + + mutex_lock(&ar->conf_mutex); + + if (ar->state == ATH10K_STATE_UTF) { + ret = -EALREADY; + goto err; + } + + /* start utf only when the driver is not in use */ + if (ar->state != ATH10K_STATE_OFF) { + ret = -EBUSY; + goto err; + } + + if (WARN_ON(ar->testmode.utf_mode_fw.fw_file.firmware != NULL)) { + /* utf image is already downloaded, it shouldn't be */ + ret = -EEXIST; + goto err; + } + + ret = ath10k_tm_fetch_firmware(ar); + if (ret) { + ath10k_err(ar, "failed to fetch UTF firmware: %d", ret); + goto err; + } + + if (ar->testmode.utf_mode_fw.fw_file.codeswap_data && + ar->testmode.utf_mode_fw.fw_file.codeswap_len) { + ret = ath10k_swap_code_seg_init(ar, + &ar->testmode.utf_mode_fw.fw_file); + if (ret) { + ath10k_warn(ar, + "failed to init utf code swap segment: %d\n", + ret); + goto err_release_utf_mode_fw; + } + } + + spin_lock_bh(&ar->data_lock); + ar->testmode.utf_monitor = true; + spin_unlock_bh(&ar->data_lock); + + ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode wmi version %d\n", + ar->testmode.utf_mode_fw.fw_file.wmi_op_version); + + ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_UTF); + if (ret) { + ath10k_err(ar, "failed to power up hif (testmode): %d\n", ret); + ar->state = ATH10K_STATE_OFF; + goto err_release_utf_mode_fw; + } + + ar->testmode.eventdata = kzalloc(ATH_FTM_EVENT_MAX_BUF_LENGTH, GFP_KERNEL); + if (!ar->testmode.eventdata) { + ret = -ENOMEM; + goto err_power_down; + } + + ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF, + &ar->testmode.utf_mode_fw); + if (ret) { + ath10k_err(ar, "failed to start core (testmode): %d\n", ret); + ar->state = ATH10K_STATE_OFF; + goto err_release_eventdata; + } + + ar->state = ATH10K_STATE_UTF; + + if (strlen(ar->testmode.utf_mode_fw.fw_file.fw_version) > 0) + ver = ar->testmode.utf_mode_fw.fw_file.fw_version; + else + ver = "API 1"; + + ath10k_info(ar, "UTF firmware %s started\n", ver); + + mutex_unlock(&ar->conf_mutex); + + return 0; + +err_release_eventdata: + kfree(ar->testmode.eventdata); + ar->testmode.eventdata = NULL; + +err_power_down: + ath10k_hif_power_down(ar); + +err_release_utf_mode_fw: + if (ar->testmode.utf_mode_fw.fw_file.codeswap_data && + ar->testmode.utf_mode_fw.fw_file.codeswap_len) + ath10k_swap_code_seg_release(ar, + &ar->testmode.utf_mode_fw.fw_file); + + release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware); + ar->testmode.utf_mode_fw.fw_file.firmware = NULL; + +err: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar) +{ + lockdep_assert_held(&ar->conf_mutex); + + ath10k_core_stop(ar); + ath10k_hif_power_down(ar); + + spin_lock_bh(&ar->data_lock); + + ar->testmode.utf_monitor = false; + + spin_unlock_bh(&ar->data_lock); + + if (ar->testmode.utf_mode_fw.fw_file.codeswap_data && + ar->testmode.utf_mode_fw.fw_file.codeswap_len) + ath10k_swap_code_seg_release(ar, + &ar->testmode.utf_mode_fw.fw_file); + + release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware); + ar->testmode.utf_mode_fw.fw_file.firmware = NULL; + + kfree(ar->testmode.eventdata); + ar->testmode.eventdata = NULL; + + ar->state = ATH10K_STATE_OFF; +} + +static int ath10k_tm_cmd_utf_stop(struct ath10k *ar, struct nlattr *tb[]) +{ + int ret; + + ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode cmd utf stop\n"); + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_UTF) { + ret = -ENETDOWN; + goto out; + } + + __ath10k_tm_cmd_utf_stop(ar); + + ret = 0; + + ath10k_info(ar, "UTF firmware stopped\n"); + +out: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static int ath10k_tm_cmd_wmi(struct ath10k *ar, struct nlattr *tb[]) +{ + struct sk_buff *skb; + int ret, buf_len; + u32 cmd_id; + void *buf; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_UTF) { + ret = -ENETDOWN; + goto out; + } + + if (!tb[ATH10K_TM_ATTR_DATA]) { + ret = -EINVAL; + goto out; + } + + if (!tb[ATH10K_TM_ATTR_WMI_CMDID]) { + ret = -EINVAL; + goto out; + } + + buf = nla_data(tb[ATH10K_TM_ATTR_DATA]); + buf_len = nla_len(tb[ATH10K_TM_ATTR_DATA]); + cmd_id = nla_get_u32(tb[ATH10K_TM_ATTR_WMI_CMDID]); + + ath10k_dbg(ar, ATH10K_DBG_TESTMODE, + "testmode cmd wmi cmd_id %d buf %p buf_len %d\n", + cmd_id, buf, buf_len); + + ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len); + + skb = ath10k_wmi_alloc_skb(ar, buf_len); + if (!skb) { + ret = -ENOMEM; + goto out; + } + + memcpy(skb->data, buf, buf_len); + + ret = ath10k_wmi_cmd_send(ar, skb, cmd_id); + if (ret) { + ath10k_warn(ar, "failed to transmit wmi command (testmode): %d\n", + ret); + goto out; + } + + ret = 0; + +out: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static int ath10k_tm_cmd_tlv(struct ath10k *ar, struct nlattr *tb[]) +{ + u16 total_bytes, num_segments; + u32 cmd_id, buf_len; + u8 segnumber = 0; + u8 *bufpos; + void *buf; + int ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_UTF) { + ret = -ENETDOWN; + goto out; + } + + buf = nla_data(tb[ATH10K_TM_ATTR_DATA]); + buf_len = nla_len(tb[ATH10K_TM_ATTR_DATA]); + cmd_id = WMI_PDEV_UTF_CMDID; + + ath10k_dbg(ar, ATH10K_DBG_TESTMODE, + "cmd wmi ftm cmd_id %d buffer length %d\n", + cmd_id, buf_len); + ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len); + + bufpos = buf; + total_bytes = buf_len; + num_segments = total_bytes / MAX_WMI_UTF_LEN; + ar->testmode.expected_seq = 0; + + if (buf_len - (num_segments * MAX_WMI_UTF_LEN)) + num_segments++; + + while (buf_len) { + u16 chunk_len = min_t(u16, buf_len, MAX_WMI_UTF_LEN); + struct wmi_ftm_cmd *ftm_cmd; + struct sk_buff *skb; + u32 hdr_info; + u8 seginfo; + + skb = ath10k_wmi_alloc_skb(ar, (chunk_len + + sizeof(struct wmi_ftm_cmd))); + if (!skb) { + ret = -ENOMEM; + goto out; + } + + ftm_cmd = (struct wmi_ftm_cmd *)skb->data; + hdr_info = FIELD_PREP(WMI_TLV_TAG, WMI_TLV_TAG_ARRAY_BYTE) | + FIELD_PREP(WMI_TLV_LEN, (chunk_len + + sizeof(struct wmi_ftm_seg_hdr))); + ftm_cmd->tlv_header = __cpu_to_le32(hdr_info); + ftm_cmd->seg_hdr.len = __cpu_to_le32(total_bytes); + ftm_cmd->seg_hdr.msgref = __cpu_to_le32(ar->testmode.ftm_msgref); + seginfo = FIELD_PREP(ATH10K_FTM_SEGHDR_TOTAL_SEGMENTS, num_segments) | + FIELD_PREP(ATH10K_FTM_SEGHDR_CURRENT_SEQ, segnumber); + ftm_cmd->seg_hdr.segmentinfo = __cpu_to_le32(seginfo); + segnumber++; + + memcpy(&ftm_cmd->data, bufpos, chunk_len); + + ret = ath10k_wmi_cmd_send(ar, skb, cmd_id); + if (ret) { + ath10k_warn(ar, "failed to send wmi ftm command: %d\n", ret); + goto out; + } + + buf_len -= chunk_len; + bufpos += chunk_len; + } + + ar->testmode.ftm_msgref++; + ret = 0; + +out: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + void *data, int len) +{ + struct ath10k *ar = hw->priv; + struct nlattr *tb[ATH10K_TM_ATTR_MAX + 1]; + int ret; + + ret = nla_parse_deprecated(tb, ATH10K_TM_ATTR_MAX, data, len, + ath10k_tm_policy, NULL); + if (ret) + return ret; + + if (!tb[ATH10K_TM_ATTR_CMD]) + return -EINVAL; + + ar->testmode.expected_seq = ATH10K_FTM_SEG_NONE; + + switch (nla_get_u32(tb[ATH10K_TM_ATTR_CMD])) { + case ATH10K_TM_CMD_GET_VERSION: + if (!tb[ATH10K_TM_ATTR_DATA]) + return ath10k_tm_cmd_get_version(ar, tb); + else /* ATH10K_TM_CMD_TLV */ + return ath10k_tm_cmd_tlv(ar, tb); + case ATH10K_TM_CMD_UTF_START: + return ath10k_tm_cmd_utf_start(ar, tb); + case ATH10K_TM_CMD_UTF_STOP: + return ath10k_tm_cmd_utf_stop(ar, tb); + case ATH10K_TM_CMD_WMI: + return ath10k_tm_cmd_wmi(ar, tb); + default: + return -EOPNOTSUPP; + } +} + +void ath10k_testmode_destroy(struct ath10k *ar) +{ + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_UTF) { + /* utf firmware is not running, nothing to do */ + goto out; + } + + __ath10k_tm_cmd_utf_stop(ar); + +out: + mutex_unlock(&ar->conf_mutex); +} diff --git a/drivers/net/wireless/ath/ath10k/testmode.h b/drivers/net/wireless/ath/ath10k/testmode.h new file mode 100644 index 000000000000..6488fd514ae3 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/testmode.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2014 Qualcomm Atheros, Inc. + */ + +#include "core.h" + +#ifdef CONFIG_NL80211_TESTMODE + +void ath10k_testmode_destroy(struct ath10k *ar); + +bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb); +int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + void *data, int len); + +#else + +static inline void ath10k_testmode_destroy(struct ath10k *ar) +{ +} + +static inline bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, + struct sk_buff *skb) +{ + return false; +} + +static inline int ath10k_tm_cmd(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + void *data, int len) +{ + return 0; +} + +#endif diff --git a/drivers/net/wireless/ath/ath10k/testmode_i.h b/drivers/net/wireless/ath/ath10k/testmode_i.h new file mode 100644 index 000000000000..1603f5276682 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/testmode_i.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2014,2017 Qualcomm Atheros, Inc. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + +/* "API" level of the ath10k testmode interface. Bump it after every + * incompatible interface change. + */ +#define ATH10K_TESTMODE_VERSION_MAJOR 1 + +/* Bump this after every _compatible_ interface change, for example + * addition of a new command or an attribute. + */ +#define ATH10K_TESTMODE_VERSION_MINOR 0 + +#define ATH10K_TM_DATA_MAX_LEN 5000 +#define ATH_FTM_EVENT_MAX_BUF_LENGTH 2048 + +enum ath10k_tm_attr { + __ATH10K_TM_ATTR_INVALID = 0, + ATH10K_TM_ATTR_CMD = 1, + ATH10K_TM_ATTR_DATA = 2, + ATH10K_TM_ATTR_WMI_CMDID = 3, + ATH10K_TM_ATTR_VERSION_MAJOR = 4, + ATH10K_TM_ATTR_VERSION_MINOR = 5, + ATH10K_TM_ATTR_WMI_OP_VERSION = 6, + + /* keep last */ + __ATH10K_TM_ATTR_AFTER_LAST, + ATH10K_TM_ATTR_MAX = __ATH10K_TM_ATTR_AFTER_LAST - 1, +}; + +/* All ath10k testmode interface commands specified in + * ATH10K_TM_ATTR_CMD + */ +enum ath10k_tm_cmd { + /* Returns the supported ath10k testmode interface version in + * ATH10K_TM_ATTR_VERSION. Always guaranteed to work. User space + * uses this to verify it's using the correct version of the + * testmode interface + */ + ATH10K_TM_CMD_GET_VERSION = 0, + + /* Boots the UTF firmware, the netdev interface must be down at the + * time. + */ + ATH10K_TM_CMD_UTF_START = 1, + + /* Shuts down the UTF firmware and puts the driver back into OFF + * state. + */ + ATH10K_TM_CMD_UTF_STOP = 2, + + /* The command used to transmit a WMI command to the firmware and + * the event to receive WMI events from the firmware. Without + * struct wmi_cmd_hdr header, only the WMI payload. Command id is + * provided with ATH10K_TM_ATTR_WMI_CMDID and payload in + * ATH10K_TM_ATTR_DATA. + */ + ATH10K_TM_CMD_WMI = 3, + + /* The command used to transmit a test command to the firmware + * and the event to receive test events from the firmware. The data + * received only contain the TLV payload, need to add the tlv header + * and send the cmd to firmware with command id WMI_PDEV_UTF_CMDID. + * The data payload size could be large and the driver needs to + * send segmented data to firmware. + * + * This legacy testmode command shares the same value as the get-version + * command. To distinguish between them, we check whether the data attribute + * is present. + */ + ATH10K_TM_CMD_TLV = ATH10K_TM_CMD_GET_VERSION, +}; diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c new file mode 100644 index 000000000000..8b15ec07b107 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/thermal.c @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2014-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/device.h> +#include <linux/sysfs.h> +#include <linux/thermal.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include "core.h" +#include "debug.h" +#include "wmi-ops.h" + +static int +ath10k_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + *state = ATH10K_THERMAL_THROTTLE_MAX; + + return 0; +} + +static int +ath10k_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + struct ath10k *ar = cdev->devdata; + + mutex_lock(&ar->conf_mutex); + *state = ar->thermal.throttle_state; + mutex_unlock(&ar->conf_mutex); + + return 0; +} + +static int +ath10k_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev, + unsigned long throttle_state) +{ + struct ath10k *ar = cdev->devdata; + + if (throttle_state > ATH10K_THERMAL_THROTTLE_MAX) { + ath10k_warn(ar, "throttle state %ld is exceeding the limit %d\n", + throttle_state, ATH10K_THERMAL_THROTTLE_MAX); + return -EINVAL; + } + mutex_lock(&ar->conf_mutex); + ar->thermal.throttle_state = throttle_state; + ath10k_thermal_set_throttling(ar); + mutex_unlock(&ar->conf_mutex); + return 0; +} + +static const struct thermal_cooling_device_ops ath10k_thermal_ops = { + .get_max_state = ath10k_thermal_get_max_throttle_state, + .get_cur_state = ath10k_thermal_get_cur_throttle_state, + .set_cur_state = ath10k_thermal_set_cur_throttle_state, +}; + +static ssize_t ath10k_thermal_show_temp(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ath10k *ar = dev_get_drvdata(dev); + int ret, temperature; + unsigned long time_left; + + mutex_lock(&ar->conf_mutex); + + /* Can't get temperature when the card is off */ + if (ar->state != ATH10K_STATE_ON) { + ret = -ENETDOWN; + goto out; + } + + reinit_completion(&ar->thermal.wmi_sync); + ret = ath10k_wmi_pdev_get_temperature(ar); + if (ret) { + ath10k_warn(ar, "failed to read temperature %d\n", ret); + goto out; + } + + if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) { + ret = -ESHUTDOWN; + goto out; + } + + time_left = wait_for_completion_timeout(&ar->thermal.wmi_sync, + ATH10K_THERMAL_SYNC_TIMEOUT_HZ); + if (!time_left) { + ath10k_warn(ar, "failed to synchronize thermal read\n"); + ret = -ETIMEDOUT; + goto out; + } + + spin_lock_bh(&ar->data_lock); + temperature = ar->thermal.temperature; + spin_unlock_bh(&ar->data_lock); + + /* display in millidegree celsius */ + ret = sysfs_emit(buf, "%d\n", temperature * 1000); +out: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature) +{ + spin_lock_bh(&ar->data_lock); + ar->thermal.temperature = temperature; + spin_unlock_bh(&ar->data_lock); + complete(&ar->thermal.wmi_sync); +} + +static SENSOR_DEVICE_ATTR(temp1_input, 0444, ath10k_thermal_show_temp, + NULL, 0); + +static struct attribute *ath10k_hwmon_attrs[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + NULL, +}; +ATTRIBUTE_GROUPS(ath10k_hwmon); + +void ath10k_thermal_set_throttling(struct ath10k *ar) +{ + u32 period, duration, enabled; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map)) + return; + + if (!ar->wmi.ops->gen_pdev_set_quiet_mode) + return; + + if (ar->state != ATH10K_STATE_ON) + return; + + period = ar->thermal.quiet_period; + duration = (period * ar->thermal.throttle_state) / 100; + enabled = duration ? 1 : 0; + + ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration, + ATH10K_QUIET_START_OFFSET, + enabled); + if (ret) { + ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n", + period, duration, enabled, ret); + } +} + +int ath10k_thermal_register(struct ath10k *ar) +{ + struct thermal_cooling_device *cdev; + struct device *hwmon_dev; + int ret; + + if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map)) + return 0; + + cdev = thermal_cooling_device_register("ath10k_thermal", ar, + &ath10k_thermal_ops); + + if (IS_ERR(cdev)) { + ath10k_err(ar, "failed to setup thermal device result: %ld\n", + PTR_ERR(cdev)); + return -EINVAL; + } + + ret = sysfs_create_link(&ar->dev->kobj, &cdev->device.kobj, + "cooling_device"); + if (ret) { + ath10k_err(ar, "failed to create cooling device symlink\n"); + goto err_cooling_destroy; + } + + ar->thermal.cdev = cdev; + ar->thermal.quiet_period = ATH10K_QUIET_PERIOD_DEFAULT; + + /* Do not register hwmon device when temperature reading is not + * supported by firmware + */ + if (!(ar->wmi.ops->gen_pdev_get_temperature)) + return 0; + + /* Avoid linking error on devm_hwmon_device_register_with_groups, I + * guess linux/hwmon.h is missing proper stubs. + */ + if (!IS_REACHABLE(CONFIG_HWMON)) + return 0; + + hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev, + "ath10k_hwmon", ar, + ath10k_hwmon_groups); + if (IS_ERR(hwmon_dev)) { + ath10k_err(ar, "failed to register hwmon device: %ld\n", + PTR_ERR(hwmon_dev)); + ret = -EINVAL; + goto err_remove_link; + } + return 0; + +err_remove_link: + sysfs_remove_link(&ar->dev->kobj, "cooling_device"); +err_cooling_destroy: + thermal_cooling_device_unregister(cdev); + return ret; +} + +void ath10k_thermal_unregister(struct ath10k *ar) +{ + if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map)) + return; + + sysfs_remove_link(&ar->dev->kobj, "cooling_device"); + thermal_cooling_device_unregister(ar->thermal.cdev); +} diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h new file mode 100644 index 000000000000..1f4de9fbf2b3 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/thermal.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2014-2016 Qualcomm Atheros, Inc. + */ +#ifndef _THERMAL_ +#define _THERMAL_ + +#define ATH10K_QUIET_PERIOD_DEFAULT 100 +#define ATH10K_QUIET_PERIOD_MIN 25 +#define ATH10K_QUIET_START_OFFSET 10 +#define ATH10K_HWMON_NAME_LEN 15 +#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5 * HZ) +#define ATH10K_THERMAL_THROTTLE_MAX 100 + +struct ath10k_thermal { + struct thermal_cooling_device *cdev; + struct completion wmi_sync; + + /* protected by conf_mutex */ + u32 throttle_state; + u32 quiet_period; + /* temperature value in Celsius degree + * protected by data_lock + */ + int temperature; +}; + +#if IS_REACHABLE(CONFIG_THERMAL) +int ath10k_thermal_register(struct ath10k *ar); +void ath10k_thermal_unregister(struct ath10k *ar); +void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature); +void ath10k_thermal_set_throttling(struct ath10k *ar); +#else +static inline int ath10k_thermal_register(struct ath10k *ar) +{ + return 0; +} + +static inline void ath10k_thermal_unregister(struct ath10k *ar) +{ +} + +static inline void ath10k_thermal_event_temperature(struct ath10k *ar, + int temperature) +{ +} + +static inline void ath10k_thermal_set_throttling(struct ath10k *ar) +{ +} + +#endif +#endif /* _THERMAL_ */ diff --git a/drivers/net/wireless/ath/ath10k/trace.c b/drivers/net/wireless/ath/ath10k/trace.c index 4a31e2c6fbd4..421ec47c59bd 100644 --- a/drivers/net/wireless/ath/ath10k/trace.c +++ b/drivers/net/wireless/ath/ath10k/trace.c @@ -1,20 +1,12 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2012 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ +#include <linux/export.h> #include <linux/module.h> #define CREATE_TRACE_POINTS #include "trace.h" +EXPORT_SYMBOL(__tracepoint_ath10k_log_dbg); diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h index 85e806bf7257..68b78ca17eaa 100644 --- a/drivers/net/wireless/ath/ath10k/trace.h +++ b/drivers/net/wireless/ath/ath10k/trace.h @@ -1,23 +1,27 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2016 Qualcomm Atheros, Inc. */ #if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) #include <linux/tracepoint.h> +#include "core.h" + +#if !defined(_TRACE_H_) +static inline u32 ath10k_frm_hdr_len(const void *buf, size_t len) +{ + const struct ieee80211_hdr *hdr = buf; + + /* In some rare cases (e.g. fcs error) device reports frame buffer + * shorter than what frame header implies (e.g. len = 0). The buffer + * can still be accessed so do a simple min() to guarantee caller + * doesn't get value greater than len. + */ + return min_t(u32, len, ieee80211_hdrlen(hdr->frame_control)); +} +#endif #define _TRACE_H_ @@ -25,7 +29,11 @@ #if !defined(CONFIG_ATH10K_TRACING) #undef TRACE_EVENT #define TRACE_EVENT(name, proto, ...) \ -static inline void trace_ ## name(proto) {} +static inline void trace_ ## name(proto) {} \ +static inline bool trace_##name##_enabled(void) \ +{ \ + return false; \ +} #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(...) #undef DEFINE_EVENT @@ -36,62 +44,76 @@ static inline void trace_ ## name(proto) {} #undef TRACE_SYSTEM #define TRACE_SYSTEM ath10k -#define ATH10K_MSG_MAX 200 +#define ATH10K_MSG_MAX 400 DECLARE_EVENT_CLASS(ath10k_log_event, - TP_PROTO(struct va_format *vaf), - TP_ARGS(vaf), + TP_PROTO(struct ath10k *ar, struct va_format *vaf), + TP_ARGS(ar, vaf), TP_STRUCT__entry( - __dynamic_array(char, msg, ATH10K_MSG_MAX) + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) + __vstring(msg, vaf->fmt, vaf->va) ), TP_fast_assign( - WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), - ATH10K_MSG_MAX, - vaf->fmt, - *vaf->va) >= ATH10K_MSG_MAX); + __assign_str(device); + __assign_str(driver); + __assign_vstr(msg, vaf->fmt, vaf->va); ), - TP_printk("%s", __get_str(msg)) + TP_printk( + "%s %s %s", + __get_str(driver), + __get_str(device), + __get_str(msg) + ) ); DEFINE_EVENT(ath10k_log_event, ath10k_log_err, - TP_PROTO(struct va_format *vaf), - TP_ARGS(vaf) + TP_PROTO(struct ath10k *ar, struct va_format *vaf), + TP_ARGS(ar, vaf) ); DEFINE_EVENT(ath10k_log_event, ath10k_log_warn, - TP_PROTO(struct va_format *vaf), - TP_ARGS(vaf) + TP_PROTO(struct ath10k *ar, struct va_format *vaf), + TP_ARGS(ar, vaf) ); DEFINE_EVENT(ath10k_log_event, ath10k_log_info, - TP_PROTO(struct va_format *vaf), - TP_ARGS(vaf) + TP_PROTO(struct ath10k *ar, struct va_format *vaf), + TP_ARGS(ar, vaf) ); TRACE_EVENT(ath10k_log_dbg, - TP_PROTO(unsigned int level, struct va_format *vaf), - TP_ARGS(level, vaf), + TP_PROTO(struct ath10k *ar, unsigned int level, struct va_format *vaf), + TP_ARGS(ar, level, vaf), TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) __field(unsigned int, level) - __dynamic_array(char, msg, ATH10K_MSG_MAX) + __vstring(msg, vaf->fmt, vaf->va) ), TP_fast_assign( + __assign_str(device); + __assign_str(driver); __entry->level = level; - WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), - ATH10K_MSG_MAX, - vaf->fmt, - *vaf->va) >= ATH10K_MSG_MAX); + __assign_vstr(msg, vaf->fmt, vaf->va); ), - TP_printk("%s", __get_str(msg)) + TP_printk( + "%s %s %s", + __get_str(driver), + __get_str(device), + __get_str(msg) + ) ); TRACE_EVENT(ath10k_log_dbg_dump, - TP_PROTO(const char *msg, const char *prefix, + TP_PROTO(struct ath10k *ar, const char *msg, const char *prefix, const void *buf, size_t buf_len), - TP_ARGS(msg, prefix, buf, buf_len), + TP_ARGS(ar, msg, prefix, buf, buf_len), TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) __string(msg, msg) __string(prefix, prefix) __field(size_t, buf_len) @@ -99,65 +121,404 @@ TRACE_EVENT(ath10k_log_dbg_dump, ), TP_fast_assign( - __assign_str(msg, msg); - __assign_str(prefix, prefix); + __assign_str(device); + __assign_str(driver); + __assign_str(msg); + __assign_str(prefix); __entry->buf_len = buf_len; memcpy(__get_dynamic_array(buf), buf, buf_len); ), TP_printk( - "%s/%s\n", __get_str(prefix), __get_str(msg) + "%s %s %s/%s\n", + __get_str(driver), + __get_str(device), + __get_str(prefix), + __get_str(msg) ) ); TRACE_EVENT(ath10k_wmi_cmd, - TP_PROTO(int id, void *buf, size_t buf_len), + TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len), - TP_ARGS(id, buf, buf_len), + TP_ARGS(ar, id, buf, buf_len), TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) __field(unsigned int, id) __field(size_t, buf_len) __dynamic_array(u8, buf, buf_len) ), TP_fast_assign( + __assign_str(device); + __assign_str(driver); __entry->id = id; __entry->buf_len = buf_len; memcpy(__get_dynamic_array(buf), buf, buf_len); ), TP_printk( - "id %d len %zu", + "%s %s id %d len %zu", + __get_str(driver), + __get_str(device), __entry->id, __entry->buf_len ) ); TRACE_EVENT(ath10k_wmi_event, - TP_PROTO(int id, void *buf, size_t buf_len), + TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len), - TP_ARGS(id, buf, buf_len), + TP_ARGS(ar, id, buf, buf_len), TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) __field(unsigned int, id) __field(size_t, buf_len) __dynamic_array(u8, buf, buf_len) ), TP_fast_assign( + __assign_str(device); + __assign_str(driver); __entry->id = id; __entry->buf_len = buf_len; memcpy(__get_dynamic_array(buf), buf, buf_len); ), TP_printk( - "id %d len %zu", + "%s %s id %d len %zu", + __get_str(driver), + __get_str(device), __entry->id, __entry->buf_len ) ); +TRACE_EVENT(ath10k_htt_stats, + TP_PROTO(struct ath10k *ar, const void *buf, size_t buf_len), + + TP_ARGS(ar, buf, buf_len), + + TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) + __field(size_t, buf_len) + __dynamic_array(u8, buf, buf_len) + ), + + TP_fast_assign( + __assign_str(device); + __assign_str(driver); + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(buf), buf, buf_len); + ), + + TP_printk( + "%s %s len %zu", + __get_str(driver), + __get_str(device), + __entry->buf_len + ) +); + +TRACE_EVENT(ath10k_wmi_dbglog, + TP_PROTO(struct ath10k *ar, const void *buf, size_t buf_len), + + TP_ARGS(ar, buf, buf_len), + + TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) + __field(u8, hw_type) + __field(size_t, buf_len) + __dynamic_array(u8, buf, buf_len) + ), + + TP_fast_assign( + __assign_str(device); + __assign_str(driver); + __entry->hw_type = ar->hw_rev; + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(buf), buf, buf_len); + ), + + TP_printk( + "%s %s %d len %zu", + __get_str(driver), + __get_str(device), + __entry->hw_type, + __entry->buf_len + ) +); + +TRACE_EVENT(ath10k_htt_pktlog, + TP_PROTO(struct ath10k *ar, const void *buf, u16 buf_len), + + TP_ARGS(ar, buf, buf_len), + + TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) + __field(u8, hw_type) + __field(u16, buf_len) + __dynamic_array(u8, pktlog, buf_len) + ), + + TP_fast_assign( + __assign_str(device); + __assign_str(driver); + __entry->hw_type = ar->hw_rev; + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(pktlog), buf, buf_len); + ), + + TP_printk( + "%s %s %d size %u", + __get_str(driver), + __get_str(device), + __entry->hw_type, + __entry->buf_len + ) +); + +TRACE_EVENT(ath10k_htt_tx, + TP_PROTO(struct ath10k *ar, u16 msdu_id, u16 msdu_len, + u8 vdev_id, u8 tid), + + TP_ARGS(ar, msdu_id, msdu_len, vdev_id, tid), + + TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) + __field(u16, msdu_id) + __field(u16, msdu_len) + __field(u8, vdev_id) + __field(u8, tid) + ), + + TP_fast_assign( + __assign_str(device); + __assign_str(driver); + __entry->msdu_id = msdu_id; + __entry->msdu_len = msdu_len; + __entry->vdev_id = vdev_id; + __entry->tid = tid; + ), + + TP_printk( + "%s %s msdu_id %d msdu_len %d vdev_id %d tid %d", + __get_str(driver), + __get_str(device), + __entry->msdu_id, + __entry->msdu_len, + __entry->vdev_id, + __entry->tid + ) +); + +TRACE_EVENT(ath10k_txrx_tx_unref, + TP_PROTO(struct ath10k *ar, u16 msdu_id), + + TP_ARGS(ar, msdu_id), + + TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) + __field(u16, msdu_id) + ), + + TP_fast_assign( + __assign_str(device); + __assign_str(driver); + __entry->msdu_id = msdu_id; + ), + + TP_printk( + "%s %s msdu_id %d", + __get_str(driver), + __get_str(device), + __entry->msdu_id + ) +); + +DECLARE_EVENT_CLASS(ath10k_hdr_event, + TP_PROTO(struct ath10k *ar, const void *data, size_t len), + + TP_ARGS(ar, data, len), + + TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) + __field(size_t, len) + __dynamic_array(u8, data, ath10k_frm_hdr_len(data, len)) + ), + + TP_fast_assign( + __assign_str(device); + __assign_str(driver); + __entry->len = ath10k_frm_hdr_len(data, len); + memcpy(__get_dynamic_array(data), data, __entry->len); + ), + + TP_printk( + "%s %s len %zu\n", + __get_str(driver), + __get_str(device), + __entry->len + ) +); + +DECLARE_EVENT_CLASS(ath10k_payload_event, + TP_PROTO(struct ath10k *ar, const void *data, size_t len), + + TP_ARGS(ar, data, len), + + TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) + __field(size_t, len) + __dynamic_array(u8, payload, (len - + ath10k_frm_hdr_len(data, len))) + ), + + TP_fast_assign( + __assign_str(device); + __assign_str(driver); + __entry->len = len - ath10k_frm_hdr_len(data, len); + memcpy(__get_dynamic_array(payload), + data + ath10k_frm_hdr_len(data, len), __entry->len); + ), + + TP_printk( + "%s %s len %zu\n", + __get_str(driver), + __get_str(device), + __entry->len + ) +); + +DEFINE_EVENT(ath10k_hdr_event, ath10k_tx_hdr, + TP_PROTO(struct ath10k *ar, const void *data, size_t len), + TP_ARGS(ar, data, len) +); + +DEFINE_EVENT(ath10k_payload_event, ath10k_tx_payload, + TP_PROTO(struct ath10k *ar, const void *data, size_t len), + TP_ARGS(ar, data, len) +); + +DEFINE_EVENT(ath10k_hdr_event, ath10k_rx_hdr, + TP_PROTO(struct ath10k *ar, const void *data, size_t len), + TP_ARGS(ar, data, len) +); + +DEFINE_EVENT(ath10k_payload_event, ath10k_rx_payload, + TP_PROTO(struct ath10k *ar, const void *data, size_t len), + TP_ARGS(ar, data, len) +); + +TRACE_EVENT(ath10k_htt_rx_desc, + TP_PROTO(struct ath10k *ar, const void *data, size_t len), + + TP_ARGS(ar, data, len), + + TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) + __field(u8, hw_type) + __field(u16, len) + __dynamic_array(u8, rxdesc, len) + ), + + TP_fast_assign( + __assign_str(device); + __assign_str(driver); + __entry->hw_type = ar->hw_rev; + __entry->len = len; + memcpy(__get_dynamic_array(rxdesc), data, len); + ), + + TP_printk( + "%s %s %d rxdesc len %d", + __get_str(driver), + __get_str(device), + __entry->hw_type, + __entry->len + ) +); + +TRACE_EVENT(ath10k_wmi_diag_container, + TP_PROTO(struct ath10k *ar, + u8 type, + u32 timestamp, + u32 code, + u16 len, + const void *data), + + TP_ARGS(ar, type, timestamp, code, len, data), + + TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) + __field(u8, type) + __field(u32, timestamp) + __field(u32, code) + __field(u16, len) + __dynamic_array(u8, data, len) + ), + + TP_fast_assign( + __assign_str(device); + __assign_str(driver); + __entry->type = type; + __entry->timestamp = timestamp; + __entry->code = code; + __entry->len = len; + memcpy(__get_dynamic_array(data), data, len); + ), + + TP_printk( + "%s %s diag container type %u timestamp %u code %u len %d", + __get_str(driver), + __get_str(device), + __entry->type, + __entry->timestamp, + __entry->code, + __entry->len + ) +); + +TRACE_EVENT(ath10k_wmi_diag, + TP_PROTO(struct ath10k *ar, const void *data, size_t len), + + TP_ARGS(ar, data, len), + + TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) + __field(u16, len) + __dynamic_array(u8, data, len) + ), + + TP_fast_assign( + __assign_str(device); + __assign_str(driver); + __entry->len = len; + memcpy(__get_dynamic_array(data), data, len); + ), + + TP_printk( + "%s %s tlv diag len %d", + __get_str(driver), + __get_str(device), + __entry->len + ) +); + #endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/ /* we don't want to use include/trace/events */ diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 68b6faefd1d8..493bfb410aff 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -1,18 +1,8 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. */ #include "core.h" @@ -23,278 +13,138 @@ static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb) { - if (!ATH10K_SKB_CB(skb)->htt.is_offchan) + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + if (likely(!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))) + return; + + if (ath10k_mac_tx_frm_has_freq(ar)) return; /* If the original wait_for_completion() timed out before * {data,mgmt}_tx_completed() was called then we could complete * offchan_tx_completed for a different skb. Prevent this by using - * offchan_tx_skb. */ + * offchan_tx_skb. + */ spin_lock_bh(&ar->data_lock); if (ar->offchan_tx_skb != skb) { - ath10k_warn("completed old offchannel frame\n"); + ath10k_warn(ar, "completed old offchannel frame\n"); goto out; } complete(&ar->offchan_tx_completed); ar->offchan_tx_skb = NULL; /* just for sanity */ - ath10k_dbg(ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb); + ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb); out: spin_unlock_bh(&ar->data_lock); } -void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc) +int ath10k_txrx_tx_unref(struct ath10k_htt *htt, + const struct htt_tx_done *tx_done) { - struct device *dev = htt->ar->dev; + struct ieee80211_tx_status status; + struct ath10k *ar = htt->ar; + struct device *dev = ar->dev; struct ieee80211_tx_info *info; - struct sk_buff *txfrag = ATH10K_SKB_CB(txdesc)->htt.txfrag; - struct sk_buff *msdu = ATH10K_SKB_CB(txdesc)->htt.msdu; - int ret; + struct ieee80211_txq *txq; + struct ath10k_skb_cb *skb_cb; + struct ath10k_txq *artxq; + struct sk_buff *msdu; + u8 flags; - if (ATH10K_SKB_CB(txdesc)->htt.refcount == 0) - return; + ath10k_dbg(ar, ATH10K_DBG_HTT, + "htt tx completion msdu_id %u status %d\n", + tx_done->msdu_id, tx_done->status); - ATH10K_SKB_CB(txdesc)->htt.refcount--; + if (tx_done->msdu_id >= htt->max_num_pending_tx) { + ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n", + tx_done->msdu_id); + return -EINVAL; + } - if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0) - return; + spin_lock_bh(&htt->tx_lock); + msdu = idr_find(&htt->pending_tx, tx_done->msdu_id); + if (!msdu) { + ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n", + tx_done->msdu_id); + spin_unlock_bh(&htt->tx_lock); + return -ENOENT; + } - if (txfrag) { - ret = ath10k_skb_unmap(dev, txfrag); - if (ret) - ath10k_warn("txfrag unmap failed (%d)\n", ret); + skb_cb = ATH10K_SKB_CB(msdu); + txq = skb_cb->txq; - dev_kfree_skb_any(txfrag); + if (txq) { + artxq = (void *)txq->drv_priv; + artxq->num_fw_queued--; } - ret = ath10k_skb_unmap(dev, msdu); - if (ret) - ath10k_warn("data skb unmap failed (%d)\n", ret); + flags = skb_cb->flags; + ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); + ath10k_htt_tx_dec_pending(htt); + spin_unlock_bh(&htt->tx_lock); + + rcu_read_lock(); + if (txq && txq->sta && skb_cb->airtime_est) + ieee80211_sta_register_airtime(txq->sta, txq->tid, + skb_cb->airtime_est, 0); + rcu_read_unlock(); + + if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) + dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); ath10k_report_offchan_tx(htt->ar, msdu); info = IEEE80211_SKB_CB(msdu); memset(&info->status, 0, sizeof(info->status)); + info->status.rates[0].idx = -1; - if (ATH10K_SKB_CB(txdesc)->htt.discard) { - ieee80211_free_txskb(htt->ar->hw, msdu); - goto exit; - } + trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id); - if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && + !(flags & ATH10K_SKB_F_NOACK_TID)) info->flags |= IEEE80211_TX_STAT_ACK; - if (ATH10K_SKB_CB(txdesc)->htt.no_ack) + if (tx_done->status == HTT_TX_COMPL_STATE_NOACK) info->flags &= ~IEEE80211_TX_STAT_ACK; - ieee80211_tx_status(htt->ar->hw, msdu); - /* we do not own the msdu anymore */ - -exit: - spin_lock_bh(&htt->tx_lock); - htt->pending_tx[ATH10K_SKB_CB(txdesc)->htt.msdu_id] = NULL; - ath10k_htt_tx_free_msdu_id(htt, ATH10K_SKB_CB(txdesc)->htt.msdu_id); - __ath10k_htt_tx_dec_pending(htt); - if (bitmap_empty(htt->used_msdu_ids, htt->max_num_pending_tx)) - wake_up(&htt->empty_tx_wq); - spin_unlock_bh(&htt->tx_lock); - - dev_kfree_skb_any(txdesc); -} - -void ath10k_txrx_tx_completed(struct ath10k_htt *htt, - const struct htt_tx_done *tx_done) -{ - struct sk_buff *txdesc; - - ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n", - tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack); - - if (tx_done->msdu_id >= htt->max_num_pending_tx) { - ath10k_warn("warning: msdu_id %d too big, ignoring\n", - tx_done->msdu_id); - return; + if ((tx_done->status == HTT_TX_COMPL_STATE_ACK) && + ((info->flags & IEEE80211_TX_CTL_NO_ACK) || + (flags & ATH10K_SKB_F_NOACK_TID))) + info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; + + if (tx_done->status == HTT_TX_COMPL_STATE_DISCARD) { + if ((info->flags & IEEE80211_TX_CTL_NO_ACK) || + (flags & ATH10K_SKB_F_NOACK_TID)) + info->flags &= ~IEEE80211_TX_STAT_NOACK_TRANSMITTED; + else + info->flags &= ~IEEE80211_TX_STAT_ACK; } - txdesc = htt->pending_tx[tx_done->msdu_id]; - - ATH10K_SKB_CB(txdesc)->htt.discard = tx_done->discard; - ATH10K_SKB_CB(txdesc)->htt.no_ack = tx_done->no_ack; - - ath10k_txrx_tx_unref(htt, txdesc); -} - -static const u8 rx_legacy_rate_idx[] = { - 3, /* 0x00 - 11Mbps */ - 2, /* 0x01 - 5.5Mbps */ - 1, /* 0x02 - 2Mbps */ - 0, /* 0x03 - 1Mbps */ - 3, /* 0x04 - 11Mbps */ - 2, /* 0x05 - 5.5Mbps */ - 1, /* 0x06 - 2Mbps */ - 0, /* 0x07 - 1Mbps */ - 10, /* 0x08 - 48Mbps */ - 8, /* 0x09 - 24Mbps */ - 6, /* 0x0A - 12Mbps */ - 4, /* 0x0B - 6Mbps */ - 11, /* 0x0C - 54Mbps */ - 9, /* 0x0D - 36Mbps */ - 7, /* 0x0E - 18Mbps */ - 5, /* 0x0F - 9Mbps */ -}; - -static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info, - enum ieee80211_band band, - struct ieee80211_rx_status *status) -{ - u8 cck, rate, rate_idx, bw, sgi, mcs, nss; - u8 info0 = info->rate.info0; - u32 info1 = info->rate.info1; - u32 info2 = info->rate.info2; - u8 preamble = 0; - - /* Check if valid fields */ - if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID)) - return; - - preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE); - - switch (preamble) { - case HTT_RX_LEGACY: - cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK; - rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE); - rate_idx = 0; - - if (rate < 0x08 || rate > 0x0F) - break; - - switch (band) { - case IEEE80211_BAND_2GHZ: - if (cck) - rate &= ~BIT(3); - rate_idx = rx_legacy_rate_idx[rate]; - break; - case IEEE80211_BAND_5GHZ: - rate_idx = rx_legacy_rate_idx[rate]; - /* We are using same rate table registering - HW - ath10k_rates[]. In case of 5GHz skip - CCK rates, so -4 here */ - rate_idx -= 4; - break; - default: - break; - } - - status->rate_idx = rate_idx; - break; - case HTT_RX_HT: - case HTT_RX_HT_WITH_TXBF: - /* HT-SIG - Table 20-11 in info1 and info2 */ - mcs = info1 & 0x1F; - nss = mcs >> 3; - bw = (info1 >> 7) & 1; - sgi = (info2 >> 7) & 1; - - status->rate_idx = mcs; - status->flag |= RX_FLAG_HT; - if (sgi) - status->flag |= RX_FLAG_SHORT_GI; - if (bw) - status->flag |= RX_FLAG_40MHZ; - break; - case HTT_RX_VHT: - case HTT_RX_VHT_WITH_TXBF: - /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2 - TODO check this */ - mcs = (info2 >> 4) & 0x0F; - nss = (info1 >> 10) & 0x07; - bw = info1 & 3; - sgi = info2 & 1; - - status->rate_idx = mcs; - status->vht_nss = nss; - - if (sgi) - status->flag |= RX_FLAG_SHORT_GI; - - switch (bw) { - /* 20MHZ */ - case 0: - break; - /* 40MHZ */ - case 1: - status->flag |= RX_FLAG_40MHZ; - break; - /* 80MHZ */ - case 2: - status->flag |= RX_FLAG_80MHZ; - } - - status->flag |= RX_FLAG_VHT; - break; - default: - break; + if (tx_done->status == HTT_TX_COMPL_STATE_ACK && + tx_done->ack_rssi != ATH10K_INVALID_RSSI) { + info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR + + tx_done->ack_rssi; + info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; } -} -void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info) -{ - struct ieee80211_rx_status *status; - struct ieee80211_channel *ch; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)info->skb->data; - - status = IEEE80211_SKB_RXCB(info->skb); - memset(status, 0, sizeof(*status)); - - if (info->encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) { - status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED | - RX_FLAG_MMIC_STRIPPED; - hdr->frame_control = __cpu_to_le16( - __le16_to_cpu(hdr->frame_control) & - ~IEEE80211_FCTL_PROTECTED); - } + memset(&status, 0, sizeof(status)); + status.skb = msdu; + status.info = info; - if (info->status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) - status->flag |= RX_FLAG_MMIC_ERROR; + rcu_read_lock(); - if (info->fcs_err) - status->flag |= RX_FLAG_FAILED_FCS_CRC; + if (txq) + status.sta = txq->sta; - status->signal = info->signal; + ieee80211_tx_status_ext(htt->ar->hw, &status); - spin_lock_bh(&ar->data_lock); - ch = ar->scan_channel; - if (!ch) - ch = ar->rx_channel; - spin_unlock_bh(&ar->data_lock); + rcu_read_unlock(); - if (!ch) { - ath10k_warn("no channel configured; ignoring frame!\n"); - dev_kfree_skb_any(info->skb); - return; - } + /* we do not own the msdu anymore */ - process_rx_rates(ar, info, ch->band, status); - status->band = ch->band; - status->freq = ch->center_freq; - - ath10k_dbg(ATH10K_DBG_DATA, - "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u\n", - info->skb, - info->skb->len, - status->flag == 0 ? "legacy" : "", - status->flag & RX_FLAG_HT ? "ht" : "", - status->flag & RX_FLAG_VHT ? "vht" : "", - status->flag & RX_FLAG_40MHZ ? "40" : "", - status->flag & RX_FLAG_80MHZ ? "80" : "", - status->flag & RX_FLAG_SHORT_GI ? "sgi " : "", - status->rate_idx, - status->vht_nss, - status->freq, - status->band); - - ieee80211_rx(ar->hw, info->skb); + return 0; } struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, @@ -307,7 +157,7 @@ struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, list_for_each_entry(peer, &ar->peers, list) { if (peer->vdev_id != vdev_id) continue; - if (memcmp(peer->addr, addr, ETH_ALEN)) + if (!ether_addr_equal(peer->addr, addr)) continue; return peer; @@ -316,11 +166,13 @@ struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, return NULL; } -static struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, - int peer_id) +struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id) { struct ath10k_peer *peer; + if (peer_id >= BITS_PER_TYPE(peer->peer_ids)) + return NULL; + lockdep_assert_held(&ar->data_lock); list_for_each_entry(peer, &ar->peers, list) @@ -333,19 +185,20 @@ static struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id, const u8 *addr, bool expect_mapped) { - int ret; + long time_left; - ret = wait_event_timeout(ar->peer_mapping_wq, ({ + time_left = wait_event_timeout(ar->peer_mapping_wq, ({ bool mapped; spin_lock_bh(&ar->data_lock); mapped = !!ath10k_peer_find(ar, vdev_id, addr); spin_unlock_bh(&ar->data_lock); - mapped == expect_mapped; - }), 3*HZ); + (mapped == expect_mapped || + test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)); + }), 3 * HZ); - if (ret <= 0) + if (time_left == 0) return -ETIMEDOUT; return 0; @@ -367,6 +220,13 @@ void ath10k_peer_map_event(struct ath10k_htt *htt, struct ath10k *ar = htt->ar; struct ath10k_peer *peer; + if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) { + ath10k_warn(ar, + "received htt peer map event with idx out of bounds: %u\n", + ev->peer_id); + return; + } + spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr); if (!peer) { @@ -375,14 +235,16 @@ void ath10k_peer_map_event(struct ath10k_htt *htt, goto exit; peer->vdev_id = ev->vdev_id; - memcpy(peer->addr, ev->addr, ETH_ALEN); + ether_addr_copy(peer->addr, ev->addr); list_add(&peer->list, &ar->peers); wake_up(&ar->peer_mapping_wq); } - ath10k_dbg(ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n", + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n", ev->vdev_id, ev->addr, ev->peer_id); + WARN_ON(ar->peer_map[ev->peer_id] && (ar->peer_map[ev->peer_id] != peer)); + ar->peer_map[ev->peer_id] = peer; set_bit(ev->peer_id, peer->peer_ids); exit: spin_unlock_bh(&ar->data_lock); @@ -394,16 +256,25 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt, struct ath10k *ar = htt->ar; struct ath10k_peer *peer; + if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) { + ath10k_warn(ar, + "received htt peer unmap event with idx out of bounds: %u\n", + ev->peer_id); + return; + } + spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find_by_id(ar, ev->peer_id); if (!peer) { - ath10k_warn("unknown peer id %d\n", ev->peer_id); + ath10k_warn(ar, "peer-unmap-event: unknown peer id %d\n", + ev->peer_id); goto exit; } - ath10k_dbg(ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n", + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n", peer->vdev_id, peer->addr, ev->peer_id); + ar->peer_map[ev->peer_id] = NULL; clear_bit(ev->peer_id, peer->peer_ids); if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) { diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h index e78632a76df7..ecac441d83a7 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.h +++ b/drivers/net/wireless/ath/ath10k/txrx.h @@ -1,31 +1,19 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2014,2016 Qualcomm Atheros, Inc. */ #ifndef _TXRX_H_ #define _TXRX_H_ #include "htt.h" -void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc); -void ath10k_txrx_tx_completed(struct ath10k_htt *htt, - const struct htt_tx_done *tx_done); -void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info); +int ath10k_txrx_tx_unref(struct ath10k_htt *htt, + const struct htt_tx_done *tx_done); struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, const u8 *addr); +struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id); int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr); int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c new file mode 100644 index 000000000000..1732a4f98418 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/usb.c @@ -0,0 +1,1130 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2007-2011 Atheros Communications Inc. + * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc. + * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com> + */ + +#include <linux/module.h> +#include <linux/usb.h> + +#include "debug.h" +#include "core.h" +#include "bmi.h" +#include "hif.h" +#include "htc.h" +#include "usb.h" + +static void ath10k_usb_post_recv_transfers(struct ath10k *ar, + struct ath10k_usb_pipe *recv_pipe); + +/* inlined helper functions */ + +static inline enum ath10k_htc_ep_id +eid_from_htc_hdr(struct ath10k_htc_hdr *htc_hdr) +{ + return (enum ath10k_htc_ep_id)htc_hdr->eid; +} + +static inline bool is_trailer_only_msg(struct ath10k_htc_hdr *htc_hdr) +{ + return __le16_to_cpu(htc_hdr->len) == htc_hdr->trailer_len; +} + +/* pipe/urb operations */ +static struct ath10k_urb_context * +ath10k_usb_alloc_urb_from_pipe(struct ath10k_usb_pipe *pipe) +{ + struct ath10k_urb_context *urb_context = NULL; + unsigned long flags; + + /* bail if this pipe is not initialized */ + if (!pipe->ar_usb) + return NULL; + + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); + if (!list_empty(&pipe->urb_list_head)) { + urb_context = list_first_entry(&pipe->urb_list_head, + struct ath10k_urb_context, link); + list_del(&urb_context->link); + pipe->urb_cnt--; + } + spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags); + + return urb_context; +} + +static void ath10k_usb_free_urb_to_pipe(struct ath10k_usb_pipe *pipe, + struct ath10k_urb_context *urb_context) +{ + unsigned long flags; + + /* bail if this pipe is not initialized */ + if (!pipe->ar_usb) + return; + + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); + + pipe->urb_cnt++; + list_add(&urb_context->link, &pipe->urb_list_head); + + spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags); +} + +static void ath10k_usb_cleanup_recv_urb(struct ath10k_urb_context *urb_context) +{ + dev_kfree_skb(urb_context->skb); + urb_context->skb = NULL; + + ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context); +} + +static void ath10k_usb_free_pipe_resources(struct ath10k *ar, + struct ath10k_usb_pipe *pipe) +{ + struct ath10k_urb_context *urb_context; + + if (!pipe->ar_usb) { + /* nothing allocated for this pipe */ + return; + } + + ath10k_dbg(ar, ATH10K_DBG_USB, + "usb free resources lpipe %d hpipe 0x%x urbs %d avail %d\n", + pipe->logical_pipe_num, pipe->usb_pipe_handle, + pipe->urb_alloc, pipe->urb_cnt); + + if (pipe->urb_alloc != pipe->urb_cnt) { + ath10k_dbg(ar, ATH10K_DBG_USB, + "usb urb leak lpipe %d hpipe 0x%x urbs %d avail %d\n", + pipe->logical_pipe_num, pipe->usb_pipe_handle, + pipe->urb_alloc, pipe->urb_cnt); + } + + for (;;) { + urb_context = ath10k_usb_alloc_urb_from_pipe(pipe); + + if (!urb_context) + break; + + kfree(urb_context); + } +} + +static void ath10k_usb_cleanup_pipe_resources(struct ath10k *ar) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + int i; + + for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) + ath10k_usb_free_pipe_resources(ar, &ar_usb->pipes[i]); +} + +/* hif usb rx/tx completion functions */ + +static void ath10k_usb_recv_complete(struct urb *urb) +{ + struct ath10k_urb_context *urb_context = urb->context; + struct ath10k_usb_pipe *pipe = urb_context->pipe; + struct ath10k *ar = pipe->ar_usb->ar; + struct sk_buff *skb; + int status = 0; + + ath10k_dbg(ar, ATH10K_DBG_USB_BULK, + "usb recv pipe %d stat %d len %d urb 0x%p\n", + pipe->logical_pipe_num, urb->status, urb->actual_length, + urb); + + if (urb->status != 0) { + status = -EIO; + switch (urb->status) { + case -ECONNRESET: + case -ENOENT: + case -ESHUTDOWN: + /* no need to spew these errors when device + * removed or urb killed due to driver shutdown + */ + status = -ECANCELED; + break; + default: + ath10k_dbg(ar, ATH10K_DBG_USB_BULK, + "usb recv pipe %d ep 0x%2.2x failed: %d\n", + pipe->logical_pipe_num, + pipe->ep_address, urb->status); + break; + } + goto cleanup_recv_urb; + } + + if (urb->actual_length == 0) + goto cleanup_recv_urb; + + skb = urb_context->skb; + + /* we are going to pass it up */ + urb_context->skb = NULL; + skb_put(skb, urb->actual_length); + + /* note: queue implements a lock */ + skb_queue_tail(&pipe->io_comp_queue, skb); + schedule_work(&pipe->io_complete_work); + +cleanup_recv_urb: + ath10k_usb_cleanup_recv_urb(urb_context); + + if (status == 0 && + pipe->urb_cnt >= pipe->urb_cnt_thresh) { + /* our free urbs are piling up, post more transfers */ + ath10k_usb_post_recv_transfers(ar, pipe); + } +} + +static void ath10k_usb_transmit_complete(struct urb *urb) +{ + struct ath10k_urb_context *urb_context = urb->context; + struct ath10k_usb_pipe *pipe = urb_context->pipe; + struct ath10k *ar = pipe->ar_usb->ar; + struct sk_buff *skb; + + if (urb->status != 0) { + ath10k_dbg(ar, ATH10K_DBG_USB_BULK, + "pipe: %d, failed:%d\n", + pipe->logical_pipe_num, urb->status); + } + + skb = urb_context->skb; + urb_context->skb = NULL; + ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context); + + /* note: queue implements a lock */ + skb_queue_tail(&pipe->io_comp_queue, skb); + schedule_work(&pipe->io_complete_work); +} + +/* pipe operations */ +static void ath10k_usb_post_recv_transfers(struct ath10k *ar, + struct ath10k_usb_pipe *recv_pipe) +{ + struct ath10k_urb_context *urb_context; + struct urb *urb; + int usb_status; + + for (;;) { + urb_context = ath10k_usb_alloc_urb_from_pipe(recv_pipe); + if (!urb_context) + break; + + urb_context->skb = dev_alloc_skb(ATH10K_USB_RX_BUFFER_SIZE); + if (!urb_context->skb) + goto err; + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) + goto err; + + usb_fill_bulk_urb(urb, + recv_pipe->ar_usb->udev, + recv_pipe->usb_pipe_handle, + urb_context->skb->data, + ATH10K_USB_RX_BUFFER_SIZE, + ath10k_usb_recv_complete, urb_context); + + ath10k_dbg(ar, ATH10K_DBG_USB_BULK, + "usb bulk recv submit %d 0x%x ep 0x%2.2x len %d buf 0x%p\n", + recv_pipe->logical_pipe_num, + recv_pipe->usb_pipe_handle, recv_pipe->ep_address, + ATH10K_USB_RX_BUFFER_SIZE, urb_context->skb); + + usb_anchor_urb(urb, &recv_pipe->urb_submitted); + usb_status = usb_submit_urb(urb, GFP_ATOMIC); + + if (usb_status) { + ath10k_dbg(ar, ATH10K_DBG_USB_BULK, + "usb bulk recv failed: %d\n", + usb_status); + usb_unanchor_urb(urb); + usb_free_urb(urb); + goto err; + } + usb_free_urb(urb); + } + + return; + +err: + ath10k_usb_cleanup_recv_urb(urb_context); +} + +static void ath10k_usb_flush_all(struct ath10k *ar) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + int i; + + for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) { + if (ar_usb->pipes[i].ar_usb) { + usb_kill_anchored_urbs(&ar_usb->pipes[i].urb_submitted); + cancel_work_sync(&ar_usb->pipes[i].io_complete_work); + } + } +} + +static void ath10k_usb_start_recv_pipes(struct ath10k *ar) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + + ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA].urb_cnt_thresh = 1; + + ath10k_usb_post_recv_transfers(ar, + &ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA]); +} + +static void ath10k_usb_tx_complete(struct ath10k *ar, struct sk_buff *skb) +{ + struct ath10k_htc_hdr *htc_hdr; + struct ath10k_htc_ep *ep; + + htc_hdr = (struct ath10k_htc_hdr *)skb->data; + ep = &ar->htc.endpoint[htc_hdr->eid]; + ath10k_htc_notify_tx_completion(ep, skb); + /* The TX complete handler now owns the skb... */ +} + +static void ath10k_usb_rx_complete(struct ath10k *ar, struct sk_buff *skb) +{ + struct ath10k_htc *htc = &ar->htc; + struct ath10k_htc_hdr *htc_hdr; + enum ath10k_htc_ep_id eid; + struct ath10k_htc_ep *ep; + u16 payload_len; + u8 *trailer; + int ret; + + htc_hdr = (struct ath10k_htc_hdr *)skb->data; + eid = eid_from_htc_hdr(htc_hdr); + ep = &ar->htc.endpoint[eid]; + + if (ep->service_id == 0) { + ath10k_warn(ar, "ep %d is not connected\n", eid); + goto out_free_skb; + } + + payload_len = le16_to_cpu(htc_hdr->len); + if (!payload_len) { + ath10k_warn(ar, "zero length frame received, firmware crashed?\n"); + goto out_free_skb; + } + + if (payload_len < htc_hdr->trailer_len) { + ath10k_warn(ar, "malformed frame received, firmware crashed?\n"); + goto out_free_skb; + } + + if (htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT) { + trailer = skb->data + sizeof(*htc_hdr) + payload_len - + htc_hdr->trailer_len; + + ret = ath10k_htc_process_trailer(htc, + trailer, + htc_hdr->trailer_len, + eid, + NULL, + NULL); + if (ret) + goto out_free_skb; + + if (is_trailer_only_msg(htc_hdr)) + goto out_free_skb; + + /* strip off the trailer from the skb since it should not + * be passed on to upper layers + */ + skb_trim(skb, skb->len - htc_hdr->trailer_len); + } + + skb_pull(skb, sizeof(*htc_hdr)); + ep->ep_ops.ep_rx_complete(ar, skb); + /* The RX complete handler now owns the skb... */ + + if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) { + local_bh_disable(); + napi_schedule(&ar->napi); + local_bh_enable(); + } + + return; + +out_free_skb: + dev_kfree_skb(skb); +} + +static void ath10k_usb_io_comp_work(struct work_struct *work) +{ + struct ath10k_usb_pipe *pipe = container_of(work, + struct ath10k_usb_pipe, + io_complete_work); + struct ath10k *ar = pipe->ar_usb->ar; + struct sk_buff *skb; + + while ((skb = skb_dequeue(&pipe->io_comp_queue))) { + if (pipe->flags & ATH10K_USB_PIPE_FLAG_TX) + ath10k_usb_tx_complete(ar, skb); + else + ath10k_usb_rx_complete(ar, skb); + } +} + +#define ATH10K_USB_MAX_DIAG_CMD (sizeof(struct ath10k_usb_ctrl_diag_cmd_write)) +#define ATH10K_USB_MAX_DIAG_RESP (sizeof(struct ath10k_usb_ctrl_diag_resp_read)) + +static void ath10k_usb_destroy(struct ath10k *ar) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + + ath10k_usb_flush_all(ar); + ath10k_usb_cleanup_pipe_resources(ar); + usb_set_intfdata(ar_usb->interface, NULL); + + kfree(ar_usb->diag_cmd_buffer); + kfree(ar_usb->diag_resp_buffer); +} + +static int ath10k_usb_hif_start(struct ath10k *ar) +{ + int i; + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + + ath10k_core_napi_enable(ar); + ath10k_usb_start_recv_pipes(ar); + + /* set the TX resource avail threshold for each TX pipe */ + for (i = ATH10K_USB_PIPE_TX_CTRL; + i <= ATH10K_USB_PIPE_TX_DATA_HP; i++) { + ar_usb->pipes[i].urb_cnt_thresh = + ar_usb->pipes[i].urb_alloc / 2; + } + + return 0; +} + +static int ath10k_usb_hif_tx_sg(struct ath10k *ar, u8 pipe_id, + struct ath10k_hif_sg_item *items, int n_items) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + struct ath10k_usb_pipe *pipe = &ar_usb->pipes[pipe_id]; + struct ath10k_urb_context *urb_context; + struct sk_buff *skb; + struct urb *urb; + int ret, i; + + for (i = 0; i < n_items; i++) { + urb_context = ath10k_usb_alloc_urb_from_pipe(pipe); + if (!urb_context) { + ret = -ENOMEM; + goto err; + } + + skb = items[i].transfer_context; + urb_context->skb = skb; + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) { + ret = -ENOMEM; + goto err_free_urb_to_pipe; + } + + usb_fill_bulk_urb(urb, + ar_usb->udev, + pipe->usb_pipe_handle, + skb->data, + skb->len, + ath10k_usb_transmit_complete, urb_context); + + if (!(skb->len % pipe->max_packet_size)) { + /* hit a max packet boundary on this pipe */ + urb->transfer_flags |= URB_ZERO_PACKET; + } + + usb_anchor_urb(urb, &pipe->urb_submitted); + ret = usb_submit_urb(urb, GFP_ATOMIC); + if (ret) { + ath10k_dbg(ar, ATH10K_DBG_USB_BULK, + "usb bulk transmit failed: %d\n", ret); + usb_unanchor_urb(urb); + usb_free_urb(urb); + ret = -EINVAL; + goto err_free_urb_to_pipe; + } + + usb_free_urb(urb); + } + + return 0; + +err_free_urb_to_pipe: + ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context); +err: + return ret; +} + +static void ath10k_usb_hif_stop(struct ath10k *ar) +{ + ath10k_usb_flush_all(ar); + ath10k_core_napi_sync_disable(ar); +} + +static u16 ath10k_usb_hif_get_free_queue_number(struct ath10k *ar, u8 pipe_id) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + + return ar_usb->pipes[pipe_id].urb_cnt; +} + +static int ath10k_usb_submit_ctrl_out(struct ath10k *ar, + u8 req, u16 value, u16 index, void *data, + u32 size) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + u8 *buf = NULL; + int ret; + + if (size > 0) { + buf = kmemdup(data, size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + } + + /* note: if successful returns number of bytes transferred */ + ret = usb_control_msg(ar_usb->udev, + usb_sndctrlpipe(ar_usb->udev, 0), + req, + USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, value, index, buf, + size, 1000); + + if (ret < 0) { + ath10k_warn(ar, "Failed to submit usb control message: %d\n", + ret); + kfree(buf); + return ret; + } + + kfree(buf); + + return 0; +} + +static int ath10k_usb_submit_ctrl_in(struct ath10k *ar, + u8 req, u16 value, u16 index, void *data, + u32 size) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + u8 *buf = NULL; + int ret; + + if (size > 0) { + buf = kmalloc(size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + } + + /* note: if successful returns number of bytes transferred */ + ret = usb_control_msg(ar_usb->udev, + usb_rcvctrlpipe(ar_usb->udev, 0), + req, + USB_DIR_IN | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, value, index, buf, + size, 2000); + + if (ret < 0) { + ath10k_warn(ar, "Failed to read usb control message: %d\n", + ret); + kfree(buf); + return ret; + } + + memcpy((u8 *)data, buf, size); + + kfree(buf); + + return 0; +} + +static int ath10k_usb_ctrl_msg_exchange(struct ath10k *ar, + u8 req_val, u8 *req_buf, u32 req_len, + u8 resp_val, u8 *resp_buf, + u32 *resp_len) +{ + int ret; + + /* send command */ + ret = ath10k_usb_submit_ctrl_out(ar, req_val, 0, 0, + req_buf, req_len); + if (ret) + goto err; + + /* get response */ + if (resp_buf) { + ret = ath10k_usb_submit_ctrl_in(ar, resp_val, 0, 0, + resp_buf, *resp_len); + if (ret) + goto err; + } + + return 0; +err: + return ret; +} + +static int ath10k_usb_hif_diag_read(struct ath10k *ar, u32 address, void *buf, + size_t buf_len) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + struct ath10k_usb_ctrl_diag_cmd_read *cmd; + u32 resp_len; + int ret; + + if (buf_len < sizeof(struct ath10k_usb_ctrl_diag_resp_read)) + return -EINVAL; + + cmd = (struct ath10k_usb_ctrl_diag_cmd_read *)ar_usb->diag_cmd_buffer; + memset(cmd, 0, sizeof(*cmd)); + cmd->cmd = ATH10K_USB_CTRL_DIAG_CC_READ; + cmd->address = cpu_to_le32(address); + resp_len = sizeof(struct ath10k_usb_ctrl_diag_resp_read); + + ret = ath10k_usb_ctrl_msg_exchange(ar, + ATH10K_USB_CONTROL_REQ_DIAG_CMD, + (u8 *)cmd, + sizeof(*cmd), + ATH10K_USB_CONTROL_REQ_DIAG_RESP, + ar_usb->diag_resp_buffer, &resp_len); + if (ret) + return ret; + + if (resp_len != sizeof(struct ath10k_usb_ctrl_diag_resp_read)) + return -EMSGSIZE; + + memcpy(buf, ar_usb->diag_resp_buffer, + sizeof(struct ath10k_usb_ctrl_diag_resp_read)); + + return 0; +} + +static int ath10k_usb_hif_diag_write(struct ath10k *ar, u32 address, + const void *data, int nbytes) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + struct ath10k_usb_ctrl_diag_cmd_write *cmd; + int ret; + + if (nbytes != sizeof(cmd->value)) + return -EINVAL; + + cmd = (struct ath10k_usb_ctrl_diag_cmd_write *)ar_usb->diag_cmd_buffer; + memset(cmd, 0, sizeof(*cmd)); + cmd->cmd = cpu_to_le32(ATH10K_USB_CTRL_DIAG_CC_WRITE); + cmd->address = cpu_to_le32(address); + memcpy(&cmd->value, data, nbytes); + + ret = ath10k_usb_ctrl_msg_exchange(ar, + ATH10K_USB_CONTROL_REQ_DIAG_CMD, + (u8 *)cmd, + sizeof(*cmd), + 0, NULL, NULL); + if (ret) + return ret; + + return 0; +} + +static int ath10k_usb_bmi_exchange_msg(struct ath10k *ar, + void *req, u32 req_len, + void *resp, u32 *resp_len) +{ + int ret; + + if (req) { + ret = ath10k_usb_submit_ctrl_out(ar, + ATH10K_USB_CONTROL_REQ_SEND_BMI_CMD, + 0, 0, req, req_len); + if (ret) { + ath10k_warn(ar, + "unable to send the bmi data to the device: %d\n", + ret); + return ret; + } + } + + if (resp) { + ret = ath10k_usb_submit_ctrl_in(ar, + ATH10K_USB_CONTROL_REQ_RECV_BMI_RESP, + 0, 0, resp, *resp_len); + if (ret) { + ath10k_warn(ar, + "Unable to read the bmi data from the device: %d\n", + ret); + return ret; + } + } + + return 0; +} + +static void ath10k_usb_hif_get_default_pipe(struct ath10k *ar, + u8 *ul_pipe, u8 *dl_pipe) +{ + *ul_pipe = ATH10K_USB_PIPE_TX_CTRL; + *dl_pipe = ATH10K_USB_PIPE_RX_CTRL; +} + +static int ath10k_usb_hif_map_service_to_pipe(struct ath10k *ar, u16 svc_id, + u8 *ul_pipe, u8 *dl_pipe) +{ + switch (svc_id) { + case ATH10K_HTC_SVC_ID_RSVD_CTRL: + case ATH10K_HTC_SVC_ID_WMI_CONTROL: + *ul_pipe = ATH10K_USB_PIPE_TX_CTRL; + /* due to large control packets, shift to data pipe */ + *dl_pipe = ATH10K_USB_PIPE_RX_DATA; + break; + case ATH10K_HTC_SVC_ID_HTT_DATA_MSG: + *ul_pipe = ATH10K_USB_PIPE_TX_DATA_LP; + /* Disable rxdata2 directly, it will be enabled + * if FW enable rxdata2 + */ + *dl_pipe = ATH10K_USB_PIPE_RX_DATA; + break; + default: + return -EPERM; + } + + return 0; +} + +static int ath10k_usb_hif_power_up(struct ath10k *ar, + enum ath10k_firmware_mode fw_mode) +{ + return 0; +} + +static void ath10k_usb_hif_power_down(struct ath10k *ar) +{ + ath10k_usb_flush_all(ar); +} + +#ifdef CONFIG_PM + +static int ath10k_usb_hif_suspend(struct ath10k *ar) +{ + return -EOPNOTSUPP; +} + +static int ath10k_usb_hif_resume(struct ath10k *ar) +{ + return -EOPNOTSUPP; +} +#endif + +static const struct ath10k_hif_ops ath10k_usb_hif_ops = { + .tx_sg = ath10k_usb_hif_tx_sg, + .diag_read = ath10k_usb_hif_diag_read, + .diag_write = ath10k_usb_hif_diag_write, + .exchange_bmi_msg = ath10k_usb_bmi_exchange_msg, + .start = ath10k_usb_hif_start, + .stop = ath10k_usb_hif_stop, + .map_service_to_pipe = ath10k_usb_hif_map_service_to_pipe, + .get_default_pipe = ath10k_usb_hif_get_default_pipe, + .get_free_queue_number = ath10k_usb_hif_get_free_queue_number, + .power_up = ath10k_usb_hif_power_up, + .power_down = ath10k_usb_hif_power_down, +#ifdef CONFIG_PM + .suspend = ath10k_usb_hif_suspend, + .resume = ath10k_usb_hif_resume, +#endif +}; + +static u8 ath10k_usb_get_logical_pipe_num(u8 ep_address, int *urb_count) +{ + u8 pipe_num = ATH10K_USB_PIPE_INVALID; + + switch (ep_address) { + case ATH10K_USB_EP_ADDR_APP_CTRL_IN: + pipe_num = ATH10K_USB_PIPE_RX_CTRL; + *urb_count = RX_URB_COUNT; + break; + case ATH10K_USB_EP_ADDR_APP_DATA_IN: + pipe_num = ATH10K_USB_PIPE_RX_DATA; + *urb_count = RX_URB_COUNT; + break; + case ATH10K_USB_EP_ADDR_APP_INT_IN: + pipe_num = ATH10K_USB_PIPE_RX_INT; + *urb_count = RX_URB_COUNT; + break; + case ATH10K_USB_EP_ADDR_APP_DATA2_IN: + pipe_num = ATH10K_USB_PIPE_RX_DATA2; + *urb_count = RX_URB_COUNT; + break; + case ATH10K_USB_EP_ADDR_APP_CTRL_OUT: + pipe_num = ATH10K_USB_PIPE_TX_CTRL; + *urb_count = TX_URB_COUNT; + break; + case ATH10K_USB_EP_ADDR_APP_DATA_LP_OUT: + pipe_num = ATH10K_USB_PIPE_TX_DATA_LP; + *urb_count = TX_URB_COUNT; + break; + case ATH10K_USB_EP_ADDR_APP_DATA_MP_OUT: + pipe_num = ATH10K_USB_PIPE_TX_DATA_MP; + *urb_count = TX_URB_COUNT; + break; + case ATH10K_USB_EP_ADDR_APP_DATA_HP_OUT: + pipe_num = ATH10K_USB_PIPE_TX_DATA_HP; + *urb_count = TX_URB_COUNT; + break; + default: + /* note: there may be endpoints not currently used */ + break; + } + + return pipe_num; +} + +static int ath10k_usb_alloc_pipe_resources(struct ath10k *ar, + struct ath10k_usb_pipe *pipe, + int urb_cnt) +{ + struct ath10k_urb_context *urb_context; + int i; + + INIT_LIST_HEAD(&pipe->urb_list_head); + init_usb_anchor(&pipe->urb_submitted); + + for (i = 0; i < urb_cnt; i++) { + urb_context = kzalloc(sizeof(*urb_context), GFP_KERNEL); + if (!urb_context) + return -ENOMEM; + + urb_context->pipe = pipe; + + /* we are only allocate the urb contexts here, the actual URB + * is allocated from the kernel as needed to do a transaction + */ + pipe->urb_alloc++; + ath10k_usb_free_urb_to_pipe(pipe, urb_context); + } + + ath10k_dbg(ar, ATH10K_DBG_USB, + "usb alloc resources lpipe %d hpipe 0x%x urbs %d\n", + pipe->logical_pipe_num, pipe->usb_pipe_handle, + pipe->urb_alloc); + + return 0; +} + +static int ath10k_usb_setup_pipe_resources(struct ath10k *ar, + struct usb_interface *interface) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + struct usb_host_interface *iface_desc = interface->cur_altsetting; + struct usb_endpoint_descriptor *endpoint; + struct ath10k_usb_pipe *pipe; + int ret, i, urbcount; + u8 pipe_num; + + ath10k_dbg(ar, ATH10K_DBG_USB, "usb setting up pipes using interface\n"); + + /* walk descriptors and setup pipes */ + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { + endpoint = &iface_desc->endpoint[i].desc; + + if (ATH10K_USB_IS_BULK_EP(endpoint->bmAttributes)) { + ath10k_dbg(ar, ATH10K_DBG_USB, + "usb %s bulk ep 0x%2.2x maxpktsz %d\n", + ATH10K_USB_IS_DIR_IN + (endpoint->bEndpointAddress) ? + "rx" : "tx", endpoint->bEndpointAddress, + le16_to_cpu(endpoint->wMaxPacketSize)); + } else if (ATH10K_USB_IS_INT_EP(endpoint->bmAttributes)) { + ath10k_dbg(ar, ATH10K_DBG_USB, + "usb %s int ep 0x%2.2x maxpktsz %d interval %d\n", + ATH10K_USB_IS_DIR_IN + (endpoint->bEndpointAddress) ? + "rx" : "tx", endpoint->bEndpointAddress, + le16_to_cpu(endpoint->wMaxPacketSize), + endpoint->bInterval); + } else if (ATH10K_USB_IS_ISOC_EP(endpoint->bmAttributes)) { + /* TODO for ISO */ + ath10k_dbg(ar, ATH10K_DBG_USB, + "usb %s isoc ep 0x%2.2x maxpktsz %d interval %d\n", + ATH10K_USB_IS_DIR_IN + (endpoint->bEndpointAddress) ? + "rx" : "tx", endpoint->bEndpointAddress, + le16_to_cpu(endpoint->wMaxPacketSize), + endpoint->bInterval); + } + + /* Ignore broken descriptors. */ + if (usb_endpoint_maxp(endpoint) == 0) + continue; + + urbcount = 0; + + pipe_num = + ath10k_usb_get_logical_pipe_num(endpoint->bEndpointAddress, + &urbcount); + if (pipe_num == ATH10K_USB_PIPE_INVALID) + continue; + + pipe = &ar_usb->pipes[pipe_num]; + if (pipe->ar_usb) + /* hmmm..pipe was already setup */ + continue; + + pipe->ar_usb = ar_usb; + pipe->logical_pipe_num = pipe_num; + pipe->ep_address = endpoint->bEndpointAddress; + pipe->max_packet_size = le16_to_cpu(endpoint->wMaxPacketSize); + + if (ATH10K_USB_IS_BULK_EP(endpoint->bmAttributes)) { + if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) { + pipe->usb_pipe_handle = + usb_rcvbulkpipe(ar_usb->udev, + pipe->ep_address); + } else { + pipe->usb_pipe_handle = + usb_sndbulkpipe(ar_usb->udev, + pipe->ep_address); + } + } else if (ATH10K_USB_IS_INT_EP(endpoint->bmAttributes)) { + if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) { + pipe->usb_pipe_handle = + usb_rcvintpipe(ar_usb->udev, + pipe->ep_address); + } else { + pipe->usb_pipe_handle = + usb_sndintpipe(ar_usb->udev, + pipe->ep_address); + } + } else if (ATH10K_USB_IS_ISOC_EP(endpoint->bmAttributes)) { + /* TODO for ISO */ + if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) { + pipe->usb_pipe_handle = + usb_rcvisocpipe(ar_usb->udev, + pipe->ep_address); + } else { + pipe->usb_pipe_handle = + usb_sndisocpipe(ar_usb->udev, + pipe->ep_address); + } + } + + pipe->ep_desc = endpoint; + + if (!ATH10K_USB_IS_DIR_IN(pipe->ep_address)) + pipe->flags |= ATH10K_USB_PIPE_FLAG_TX; + + ret = ath10k_usb_alloc_pipe_resources(ar, pipe, urbcount); + if (ret) + return ret; + } + + return 0; +} + +static int ath10k_usb_create(struct ath10k *ar, + struct usb_interface *interface) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + struct usb_device *dev = interface_to_usbdev(interface); + struct ath10k_usb_pipe *pipe; + int ret, i; + + usb_set_intfdata(interface, ar_usb); + spin_lock_init(&ar_usb->cs_lock); + ar_usb->udev = dev; + ar_usb->interface = interface; + + for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) { + pipe = &ar_usb->pipes[i]; + INIT_WORK(&pipe->io_complete_work, + ath10k_usb_io_comp_work); + skb_queue_head_init(&pipe->io_comp_queue); + } + + ar_usb->diag_cmd_buffer = kzalloc(ATH10K_USB_MAX_DIAG_CMD, GFP_KERNEL); + if (!ar_usb->diag_cmd_buffer) { + ret = -ENOMEM; + goto err; + } + + ar_usb->diag_resp_buffer = kzalloc(ATH10K_USB_MAX_DIAG_RESP, + GFP_KERNEL); + if (!ar_usb->diag_resp_buffer) { + ret = -ENOMEM; + goto err; + } + + ret = ath10k_usb_setup_pipe_resources(ar, interface); + if (ret) + goto err; + + return 0; + +err: + ath10k_usb_destroy(ar); + return ret; +} + +static int ath10k_usb_napi_poll(struct napi_struct *ctx, int budget) +{ + struct ath10k *ar = container_of(ctx, struct ath10k, napi); + int done; + + done = ath10k_htt_rx_hl_indication(ar, budget); + ath10k_dbg(ar, ATH10K_DBG_USB, "napi poll: done: %d, budget:%d\n", done, budget); + + if (done < budget) + napi_complete_done(ctx, done); + + return done; +} + +/* ath10k usb driver registered functions */ +static int ath10k_usb_probe(struct usb_interface *interface, + const struct usb_device_id *id) +{ + struct ath10k *ar; + struct ath10k_usb *ar_usb; + struct usb_device *dev = interface_to_usbdev(interface); + int ret, vendor_id, product_id; + enum ath10k_hw_rev hw_rev; + struct ath10k_bus_params bus_params = {}; + + /* Assumption: All USB based chipsets (so far) are QCA9377 based. + * If there will be newer chipsets that does not use the hw reg + * setup as defined in qca6174_regs and qca6174_values, this + * assumption is no longer valid and hw_rev must be setup differently + * depending on chipset. + */ + hw_rev = ATH10K_HW_QCA9377; + + ar = ath10k_core_create(sizeof(*ar_usb), &dev->dev, ATH10K_BUS_USB, + hw_rev, &ath10k_usb_hif_ops); + if (!ar) { + dev_err(&dev->dev, "failed to allocate core\n"); + return -ENOMEM; + } + + netif_napi_add(ar->napi_dev, &ar->napi, ath10k_usb_napi_poll); + + usb_get_dev(dev); + vendor_id = le16_to_cpu(dev->descriptor.idVendor); + product_id = le16_to_cpu(dev->descriptor.idProduct); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "usb new func vendor 0x%04x product 0x%04x\n", + vendor_id, product_id); + + ar_usb = ath10k_usb_priv(ar); + ret = ath10k_usb_create(ar, interface); + if (ret) + goto err; + ar_usb->ar = ar; + + ar->dev_id = product_id; + ar->id.vendor = vendor_id; + ar->id.device = product_id; + + bus_params.dev_type = ATH10K_DEV_TYPE_HL; + /* TODO: don't know yet how to get chip_id with USB */ + bus_params.chip_id = 0; + bus_params.hl_msdu_ids = true; + ret = ath10k_core_register(ar, &bus_params); + if (ret) { + ath10k_warn(ar, "failed to register driver core: %d\n", ret); + goto err_usb_destroy; + } + + /* TODO: remove this once USB support is fully implemented */ + ath10k_warn(ar, "Warning: ath10k USB support is incomplete, don't expect anything to work!\n"); + + return 0; + +err_usb_destroy: + ath10k_usb_destroy(ar); + +err: + ath10k_core_destroy(ar); + + usb_put_dev(dev); + + return ret; +} + +static void ath10k_usb_remove(struct usb_interface *interface) +{ + struct ath10k_usb *ar_usb; + + ar_usb = usb_get_intfdata(interface); + if (!ar_usb) + return; + + ath10k_core_unregister(ar_usb->ar); + netif_napi_del(&ar_usb->ar->napi); + ath10k_usb_destroy(ar_usb->ar); + usb_put_dev(interface_to_usbdev(interface)); + ath10k_core_destroy(ar_usb->ar); +} + +#ifdef CONFIG_PM + +static int ath10k_usb_pm_suspend(struct usb_interface *interface, + pm_message_t message) +{ + struct ath10k_usb *ar_usb = usb_get_intfdata(interface); + + ath10k_usb_flush_all(ar_usb->ar); + return 0; +} + +static int ath10k_usb_pm_resume(struct usb_interface *interface) +{ + struct ath10k_usb *ar_usb = usb_get_intfdata(interface); + struct ath10k *ar = ar_usb->ar; + + ath10k_usb_post_recv_transfers(ar, + &ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA]); + + return 0; +} + +#else + +#define ath10k_usb_pm_suspend NULL +#define ath10k_usb_pm_resume NULL + +#endif + +/* table of devices that work with this driver */ +static struct usb_device_id ath10k_usb_ids[] = { + {USB_DEVICE(0x13b1, 0x0042)}, /* Linksys WUSB6100M */ + { /* Terminating entry */ }, +}; + +MODULE_DEVICE_TABLE(usb, ath10k_usb_ids); + +static struct usb_driver ath10k_usb_driver = { + .name = "ath10k_usb", + .probe = ath10k_usb_probe, + .suspend = ath10k_usb_pm_suspend, + .resume = ath10k_usb_pm_resume, + .disconnect = ath10k_usb_remove, + .id_table = ath10k_usb_ids, + .supports_autosuspend = true, + .disable_hub_initiated_lpm = 1, +}; + +module_usb_driver(ath10k_usb_driver); + +MODULE_AUTHOR("Atheros Communications, Inc."); +MODULE_DESCRIPTION("Driver support for Qualcomm Atheros USB 802.11ac WLAN devices"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/ath/ath10k/usb.h b/drivers/net/wireless/ath/ath10k/usb.h new file mode 100644 index 000000000000..7e4cfbb673c9 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/usb.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2004-2011 Atheros Communications Inc. + * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. + * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com> + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _USB_H_ +#define _USB_H_ + +/* constants */ +#define TX_URB_COUNT 32 +#define RX_URB_COUNT 32 +#define ATH10K_USB_RX_BUFFER_SIZE 4096 + +#define ATH10K_USB_PIPE_INVALID ATH10K_USB_PIPE_MAX + +/* USB endpoint definitions */ +#define ATH10K_USB_EP_ADDR_APP_CTRL_IN 0x81 +#define ATH10K_USB_EP_ADDR_APP_DATA_IN 0x82 +#define ATH10K_USB_EP_ADDR_APP_DATA2_IN 0x83 +#define ATH10K_USB_EP_ADDR_APP_INT_IN 0x84 + +#define ATH10K_USB_EP_ADDR_APP_CTRL_OUT 0x01 +#define ATH10K_USB_EP_ADDR_APP_DATA_LP_OUT 0x02 +#define ATH10K_USB_EP_ADDR_APP_DATA_MP_OUT 0x03 +#define ATH10K_USB_EP_ADDR_APP_DATA_HP_OUT 0x04 + +/* diagnostic command definitions */ +#define ATH10K_USB_CONTROL_REQ_SEND_BMI_CMD 1 +#define ATH10K_USB_CONTROL_REQ_RECV_BMI_RESP 2 +#define ATH10K_USB_CONTROL_REQ_DIAG_CMD 3 +#define ATH10K_USB_CONTROL_REQ_DIAG_RESP 4 + +#define ATH10K_USB_CTRL_DIAG_CC_READ 0 +#define ATH10K_USB_CTRL_DIAG_CC_WRITE 1 + +#define ATH10K_USB_IS_BULK_EP(attr) (((attr) & 3) == 0x02) +#define ATH10K_USB_IS_INT_EP(attr) (((attr) & 3) == 0x03) +#define ATH10K_USB_IS_ISOC_EP(attr) (((attr) & 3) == 0x01) +#define ATH10K_USB_IS_DIR_IN(addr) ((addr) & 0x80) + +struct ath10k_usb_ctrl_diag_cmd_write { + __le32 cmd; + __le32 address; + __le32 value; + __le32 padding; +} __packed; + +struct ath10k_usb_ctrl_diag_cmd_read { + __le32 cmd; + __le32 address; +} __packed; + +struct ath10k_usb_ctrl_diag_resp_read { + u8 value[4]; +} __packed; + +/* tx/rx pipes for usb */ +enum ath10k_usb_pipe_id { + ATH10K_USB_PIPE_TX_CTRL = 0, + ATH10K_USB_PIPE_TX_DATA_LP, + ATH10K_USB_PIPE_TX_DATA_MP, + ATH10K_USB_PIPE_TX_DATA_HP, + ATH10K_USB_PIPE_RX_CTRL, + ATH10K_USB_PIPE_RX_DATA, + ATH10K_USB_PIPE_RX_DATA2, + ATH10K_USB_PIPE_RX_INT, + ATH10K_USB_PIPE_MAX +}; + +struct ath10k_usb_pipe { + struct list_head urb_list_head; + struct usb_anchor urb_submitted; + u32 urb_alloc; + u32 urb_cnt; + u32 urb_cnt_thresh; + unsigned int usb_pipe_handle; + u32 flags; + u8 ep_address; + u8 logical_pipe_num; + struct ath10k_usb *ar_usb; + u16 max_packet_size; + struct work_struct io_complete_work; + struct sk_buff_head io_comp_queue; + struct usb_endpoint_descriptor *ep_desc; +}; + +#define ATH10K_USB_PIPE_FLAG_TX BIT(0) + +/* usb device object */ +struct ath10k_usb { + /* protects pipe->urb_list_head and pipe->urb_cnt */ + spinlock_t cs_lock; + + struct usb_device *udev; + struct usb_interface *interface; + struct ath10k_usb_pipe pipes[ATH10K_USB_PIPE_MAX]; + u8 *diag_cmd_buffer; + u8 *diag_resp_buffer; + struct ath10k *ar; +}; + +/* usb urb object */ +struct ath10k_urb_context { + struct list_head link; + struct ath10k_usb_pipe *pipe; + struct sk_buff *skb; + struct ath10k *ar; +}; + +static inline struct ath10k_usb *ath10k_usb_priv(struct ath10k *ar) +{ + return (struct ath10k_usb *)ar->drv_priv; +} + +#endif diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h new file mode 100644 index 000000000000..f3f6b5954b27 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h @@ -0,0 +1,1710 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _WMI_OPS_H_ +#define _WMI_OPS_H_ + +struct ath10k; +struct sk_buff; + +struct wmi_ops { + void (*rx)(struct ath10k *ar, struct sk_buff *skb); + void (*map_svc)(const __le32 *in, unsigned long *out, size_t len); + void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len); + + int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_scan_ev_arg *arg); + int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_mgmt_rx_ev_arg *arg); + int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_tlv_mgmt_tx_compl_ev_arg *arg); + int (*pull_mgmt_tx_bundle_compl)( + struct ath10k *ar, struct sk_buff *skb, + struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg); + int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_ch_info_ev_arg *arg); + int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_vdev_start_ev_arg *arg); + int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_peer_kick_ev_arg *arg); + int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_swba_ev_arg *arg); + int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_phyerr_hdr_arg *arg); + int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf, + int left_len, struct wmi_phyerr_ev_arg *arg); + int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_svc_rdy_ev_arg *arg); + int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_rdy_ev_arg *arg); + int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb, + struct ath10k_fw_stats *stats); + int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_roam_ev_arg *arg); + int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_wow_ev_arg *arg); + int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_echo_ev_arg *arg); + int (*pull_dfs_status_ev)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_dfs_status_ev_arg *arg); + int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_svc_avail_ev_arg *arg); + + enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar); + + struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt); + struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar); + struct sk_buff *(*gen_pdev_set_base_macaddr)(struct ath10k *ar, + const u8 macaddr[ETH_ALEN]); + struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g, + u16 rd5g, u16 ctl2g, u16 ctl5g, + enum wmi_dfs_region dfs_reg); + struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id, + u32 value); + struct sk_buff *(*gen_init)(struct ath10k *ar); + struct sk_buff *(*gen_start_scan)(struct ath10k *ar, + const struct wmi_start_scan_arg *arg); + struct sk_buff *(*gen_stop_scan)(struct ath10k *ar, + const struct wmi_stop_scan_arg *arg); + struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id, + enum wmi_vdev_type type, + enum wmi_vdev_subtype subtype, + const u8 macaddr[ETH_ALEN]); + struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id); + struct sk_buff *(*gen_vdev_start)(struct ath10k *ar, + const struct wmi_vdev_start_request_arg *arg, + bool restart); + struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id); + struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid, + const u8 *bssid); + struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id); + struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id, + u32 param_id, u32 param_value); + struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar, + const struct wmi_vdev_install_key_arg *arg); + struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar, + const struct wmi_vdev_spectral_conf_arg *arg); + struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id, + u32 trigger, u32 enable); + struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id, + const struct wmi_wmm_params_all_arg *arg); + struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN], + enum wmi_peer_type peer_type); + struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN]); + struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN], + u32 tid_bitmap); + struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id, + const u8 *peer_addr, + enum wmi_peer_param param_id, + u32 param_value); + struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar, + const struct wmi_peer_assoc_complete_arg *arg); + struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id, + enum wmi_sta_ps_mode psmode); + struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id, + enum wmi_sta_powersave_param param_id, + u32 value); + struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id, + const u8 *mac, + enum wmi_ap_ps_peer_param param_id, + u32 value); + struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar, + const struct wmi_scan_chan_list_arg *arg); + struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar, + u32 prob_req_oui); + struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id, + const void *bcn, size_t bcn_len, + u32 bcn_paddr, bool dtim_zero, + bool deliver_cab); + struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar, + const struct wmi_wmm_params_all_arg *arg); + struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask); + struct sk_buff *(*gen_request_peer_stats_info)(struct ath10k *ar, + u32 vdev_id, + enum + wmi_peer_stats_info_request_type + type, + u8 *addr, + u32 reset); + struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar, + enum wmi_force_fw_hang_type type, + u32 delay_ms); + struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb); + struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar, + struct sk_buff *skb, + dma_addr_t paddr); + int (*cleanup_mgmt_tx_send)(struct ath10k *ar, struct sk_buff *msdu); + struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable, + u32 log_level); + struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter); + struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar); + struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar, + u32 period, u32 duration, + u32 next_offset, + u32 enabled); + struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar); + struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id, + const u8 *mac); + struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id, + const u8 *mac, u32 tid, u32 buf_size); + struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id, + const u8 *mac, u32 tid, + u32 status); + struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id, + const u8 *mac, u32 tid, u32 initiator, + u32 reason); + struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id, + u32 tim_ie_offset, struct sk_buff *bcn, + u32 prb_caps, u32 prb_erp, + void *prb_ies, size_t prb_ies_len); + struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id, + struct sk_buff *bcn); + struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id, + const u8 *p2p_ie); + struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN], + const struct wmi_sta_uapsd_auto_trig_arg *args, + u32 num_ac); + struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar, + const struct wmi_sta_keepalive_arg *arg); + struct sk_buff *(*gen_wow_enable)(struct ath10k *ar); + struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id, + enum wmi_wow_wakeup_event event, + u32 enable); + struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar); + struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id, + u32 pattern_id, + const u8 *pattern, + const u8 *mask, + int pattern_len, + int pattern_offset); + struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id, + u32 pattern_id); + struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar, + u32 vdev_id, + enum wmi_tdls_state state); + struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar, + const struct wmi_tdls_peer_update_cmd_arg *arg, + const struct wmi_tdls_peer_capab_arg *cap, + const struct wmi_channel_arg *chan); + struct sk_buff *(*gen_radar_found) + (struct ath10k *ar, + const struct ath10k_radar_found_info *arg); + struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable); + struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar, + u32 param); + void (*fw_stats_fill)(struct ath10k *ar, + struct ath10k_fw_stats *fw_stats, + char *buf); + struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar, + u8 enable, + u32 detect_level, + u32 detect_margin); + struct sk_buff *(*ext_resource_config)(struct ath10k *ar, + enum wmi_host_platform_type type, + u32 fw_feature_bitmap); + int (*get_vdev_subtype)(struct ath10k *ar, + enum wmi_vdev_subtype subtype); + struct sk_buff *(*gen_wow_config_pno)(struct ath10k *ar, + u32 vdev_id, + struct wmi_pno_scan_req *pno_scan); + struct sk_buff *(*gen_pdev_bss_chan_info_req) + (struct ath10k *ar, + enum wmi_bss_survey_req_type type); + struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value); + struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar, + u32 param); + struct sk_buff *(*gen_bb_timing) + (struct ath10k *ar, + const struct wmi_bb_timing_cfg_arg *arg); + struct sk_buff *(*gen_per_peer_per_tid_cfg)(struct ath10k *ar, + const struct wmi_per_peer_per_tid_cfg_arg *arg); + struct sk_buff *(*gen_gpio_config)(struct ath10k *ar, u32 gpio_num, + u32 input, u32 pull_type, u32 intr_mode); + + struct sk_buff *(*gen_gpio_output)(struct ath10k *ar, u32 gpio_num, u32 set); +}; + +int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); + +static inline int +ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb) +{ + if (WARN_ON_ONCE(!ar->wmi.ops->rx)) + return -EOPNOTSUPP; + + ar->wmi.ops->rx(ar, skb); + return 0; +} + +static inline int +ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out, + size_t len) +{ + if (!ar->wmi.ops->map_svc) + return -EOPNOTSUPP; + + ar->wmi.ops->map_svc(in, out, len); + return 0; +} + +static inline int +ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out, + size_t len) +{ + if (!ar->wmi.ops->map_svc_ext) + return -EOPNOTSUPP; + + ar->wmi.ops->map_svc_ext(in, out, len); + return 0; +} + +static inline int +ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb, + struct wmi_scan_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_scan) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_scan(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb, + struct wmi_tlv_mgmt_tx_compl_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_mgmt_tx_compl) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb, + struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_mgmt_tx_bundle_compl) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_mgmt_tx_bundle_compl(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb, + struct wmi_mgmt_rx_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_mgmt_rx) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb, + struct wmi_ch_info_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_ch_info) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_ch_info(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb, + struct wmi_vdev_start_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_vdev_start) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_vdev_start(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb, + struct wmi_peer_kick_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_peer_kick) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_peer_kick(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb, + struct wmi_swba_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_swba) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_swba(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb, + struct wmi_phyerr_hdr_arg *arg) +{ + if (!ar->wmi.ops->pull_phyerr_hdr) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf, + int left_len, struct wmi_phyerr_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_phyerr) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg); +} + +static inline int +ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb, + struct wmi_svc_rdy_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_svc_rdy) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_svc_rdy(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb, + struct wmi_rdy_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_rdy) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_rdy(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb, + struct wmi_svc_avail_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_svc_avail) + return -EOPNOTSUPP; + return ar->wmi.ops->pull_svc_avail(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, + struct ath10k_fw_stats *stats) +{ + if (!ar->wmi.ops->pull_fw_stats) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_fw_stats(ar, skb, stats); +} + +static inline int +ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb, + struct wmi_roam_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_roam_ev) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_roam_ev(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb, + struct wmi_wow_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_wow_event) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_wow_event(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb, + struct wmi_echo_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_echo_ev) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_echo_ev(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_dfs_status(struct ath10k *ar, struct sk_buff *skb, + struct wmi_dfs_status_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_dfs_status_ev) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_dfs_status_ev(ar, skb, arg); +} + +static inline enum wmi_txbf_conf +ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar) +{ + if (!ar->wmi.ops->get_txbf_conf_scheme) + return WMI_TXBF_CONF_UNSUPPORTED; + + return ar->wmi.ops->get_txbf_conf_scheme(ar); +} + +static inline int +ath10k_wmi_cleanup_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu) +{ + if (!ar->wmi.ops->cleanup_mgmt_tx_send) + return -EOPNOTSUPP; + + return ar->wmi.ops->cleanup_mgmt_tx_send(ar, msdu); +} + +static inline int +ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, + dma_addr_t paddr) +{ + struct sk_buff *skb; + int ret; + + if (!ar->wmi.ops->gen_mgmt_tx_send) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + ret = ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->mgmt_tx_send_cmdid); + if (ret) + return ret; + + return 0; +} + +static inline int +ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); + struct sk_buff *skb; + int ret; + + if (!ar->wmi.ops->gen_mgmt_tx) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + ret = ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->mgmt_tx_cmdid); + if (ret) + return ret; + + /* FIXME There's no ACK event for Management Tx. This probably + * shouldn't be called here either. + */ + info->flags |= IEEE80211_TX_STAT_ACK; + ieee80211_tx_status_irqsafe(ar->hw, msdu); + + return 0; +} + +static inline int +ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g, + u16 ctl2g, u16 ctl5g, + enum wmi_dfs_region dfs_reg) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pdev_set_rd) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g, + dfs_reg); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_set_regdomain_cmdid); +} + +static inline int +ath10k_wmi_pdev_set_base_macaddr(struct ath10k *ar, const u8 macaddr[ETH_ALEN]) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pdev_set_base_macaddr) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pdev_set_base_macaddr(ar, macaddr); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_set_base_macaddr_cmdid); +} + +static inline int +ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pdev_suspend) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid); +} + +static inline int +ath10k_wmi_pdev_resume_target(struct ath10k *ar) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pdev_resume) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pdev_resume(ar); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid); +} + +static inline int +ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pdev_set_param) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); +} + +static inline int +ath10k_wmi_cmd_init(struct ath10k *ar) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_init) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_init(ar); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid); +} + +static inline int +ath10k_wmi_start_scan(struct ath10k *ar, + const struct wmi_start_scan_arg *arg) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_start_scan) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_start_scan(ar, arg); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid); +} + +static inline int +ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_stop_scan) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_stop_scan(ar, arg); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid); +} + +static inline int +ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, + enum wmi_vdev_type type, + enum wmi_vdev_subtype subtype, + const u8 macaddr[ETH_ALEN]) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_vdev_create) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid); +} + +static inline int +ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_vdev_delete) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid); +} + +static inline int +ath10k_wmi_vdev_start(struct ath10k *ar, + const struct wmi_vdev_start_request_arg *arg) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_vdev_start) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_vdev_start(ar, arg, false); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->vdev_start_request_cmdid); +} + +static inline int +ath10k_wmi_vdev_restart(struct ath10k *ar, + const struct wmi_vdev_start_request_arg *arg) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_vdev_start) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_vdev_start(ar, arg, true); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->vdev_restart_request_cmdid); +} + +static inline int +ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_vdev_stop) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid); +} + +static inline int +ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_vdev_up) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid); +} + +static inline int +ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_vdev_down) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid); +} + +static inline int +ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id, + u32 param_value) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_vdev_set_param) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id, + param_value); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid); +} + +static inline int +ath10k_wmi_vdev_install_key(struct ath10k *ar, + const struct wmi_vdev_install_key_arg *arg) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_vdev_install_key) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_vdev_install_key(ar, arg); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->vdev_install_key_cmdid); +} + +static inline int +ath10k_wmi_vdev_spectral_conf(struct ath10k *ar, + const struct wmi_vdev_spectral_conf_arg *arg) +{ + struct sk_buff *skb; + u32 cmd_id; + + if (!ar->wmi.ops->gen_vdev_spectral_conf) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid; + return ath10k_wmi_cmd_send(ar, skb, cmd_id); +} + +static inline int +ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, + u32 enable) +{ + struct sk_buff *skb; + u32 cmd_id; + + if (!ar->wmi.ops->gen_vdev_spectral_enable) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger, + enable); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid; + return ath10k_wmi_cmd_send(ar, skb, cmd_id); +} + +static inline int +ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN], + const struct wmi_sta_uapsd_auto_trig_arg *args, + u32 num_ac) +{ + struct sk_buff *skb; + u32 cmd_id; + + if (!ar->wmi.ops->gen_vdev_sta_uapsd) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args, + num_ac); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid; + return ath10k_wmi_cmd_send(ar, skb, cmd_id); +} + +static inline int +ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id, + const struct wmi_wmm_params_all_arg *arg) +{ + struct sk_buff *skb; + u32 cmd_id; + + skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid; + return ath10k_wmi_cmd_send(ar, skb, cmd_id); +} + +static inline int +ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN], + enum wmi_peer_type peer_type) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_peer_create) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid); +} + +static inline int +ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN]) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_peer_delete) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid); +} + +static inline int +ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_peer_flush) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid); +} + +static inline int +ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr, + enum wmi_peer_param param_id, u32 param_value) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_peer_set_param) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id, + param_value); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid); +} + +static inline int +ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, + enum wmi_sta_ps_mode psmode) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_set_psmode) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->sta_powersave_mode_cmdid); +} + +static inline int +ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, + enum wmi_sta_powersave_param param_id, u32 value) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_set_sta_ps) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->sta_powersave_param_cmdid); +} + +static inline int +ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, + enum wmi_ap_ps_peer_param param_id, u32 value) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_set_ap_ps) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->ap_ps_peer_param_cmdid); +} + +static inline int +ath10k_wmi_scan_chan_list(struct ath10k *ar, + const struct wmi_scan_chan_list_arg *arg) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_scan_chan_list) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_scan_chan_list(ar, arg); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); +} + +static inline int +ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN]) +{ + struct sk_buff *skb; + u32 prob_req_oui; + + prob_req_oui = (((u32)mac_addr[0]) << 16) | + (((u32)mac_addr[1]) << 8) | mac_addr[2]; + + if (!ar->wmi.ops->gen_scan_prob_req_oui) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->scan_prob_req_oui_cmdid); +} + +static inline int +ath10k_wmi_peer_assoc(struct ath10k *ar, + const struct wmi_peer_assoc_complete_arg *arg) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_peer_assoc) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_peer_assoc(ar, arg); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid); +} + +static inline int +ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id, + const void *bcn, size_t bcn_len, + u32 bcn_paddr, bool dtim_zero, + bool deliver_cab) +{ + struct sk_buff *skb; + int ret; + + if (!ar->wmi.ops->gen_beacon_dma) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr, + dtim_zero, deliver_cab); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + ret = ath10k_wmi_cmd_send_nowait(ar, skb, + ar->wmi.cmd->pdev_send_bcn_cmdid); + if (ret) { + dev_kfree_skb(skb); + return ret; + } + + return 0; +} + +static inline int +ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, + const struct wmi_wmm_params_all_arg *arg) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pdev_set_wmm) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_set_wmm_params_cmdid); +} + +static inline int +ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_request_stats) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_request_stats(ar, stats_mask); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid); +} + +static inline int +ath10k_wmi_request_peer_stats_info(struct ath10k *ar, + u32 vdev_id, + enum wmi_peer_stats_info_request_type type, + u8 *addr, + u32 reset) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_request_peer_stats_info) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_request_peer_stats_info(ar, + vdev_id, + type, + addr, + reset); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_peer_stats_info_cmdid); +} + +static inline int +ath10k_wmi_force_fw_hang(struct ath10k *ar, + enum wmi_force_fw_hang_type type, u32 delay_ms) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_force_fw_hang) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); +} + +static inline int ath10k_wmi_gpio_config(struct ath10k *ar, u32 gpio_num, + u32 input, u32 pull_type, u32 intr_mode) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_gpio_config) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_gpio_config(ar, gpio_num, input, pull_type, intr_mode); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->gpio_config_cmdid); +} + +static inline int ath10k_wmi_gpio_output(struct ath10k *ar, u32 gpio_num, u32 set) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_gpio_config) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_gpio_output(ar, gpio_num, set); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->gpio_output_cmdid); +} + +static inline int +ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_dbglog_cfg) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid); +} + +static inline int +ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pktlog_enable) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pktlog_enable(ar, filter); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid); +} + +static inline int +ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pktlog_disable) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pktlog_disable(ar); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_pktlog_disable_cmdid); +} + +static inline int +ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration, + u32 next_offset, u32 enabled) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pdev_set_quiet_mode) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration, + next_offset, enabled); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_set_quiet_mode_cmdid); +} + +static inline int +ath10k_wmi_pdev_get_temperature(struct ath10k *ar) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pdev_get_temperature) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pdev_get_temperature(ar); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_get_temperature_cmdid); +} + +static inline int +ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_addba_clear_resp) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->addba_clear_resp_cmdid); +} + +static inline int +ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, + u32 tid, u32 buf_size) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_addba_send) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->addba_send_cmdid); +} + +static inline int +ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac, + u32 tid, u32 status) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_addba_set_resp) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->addba_set_resp_cmdid); +} + +static inline int +ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, + u32 tid, u32 initiator, u32 reason) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_delba_send) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator, + reason); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->delba_send_cmdid); +} + +static inline int +ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset, + struct sk_buff *bcn, u32 prb_caps, u32 prb_erp, + void *prb_ies, size_t prb_ies_len) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_bcn_tmpl) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn, + prb_caps, prb_erp, prb_ies, + prb_ies_len); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid); +} + +static inline int +ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_prb_tmpl) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid); +} + +static inline int +ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_p2p_go_bcn_ie) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie); +} + +static inline int +ath10k_wmi_sta_keepalive(struct ath10k *ar, + const struct wmi_sta_keepalive_arg *arg) +{ + struct sk_buff *skb; + u32 cmd_id; + + if (!ar->wmi.ops->gen_sta_keepalive) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_sta_keepalive(ar, arg); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + cmd_id = ar->wmi.cmd->sta_keepalive_cmd; + return ath10k_wmi_cmd_send(ar, skb, cmd_id); +} + +static inline int +ath10k_wmi_wow_enable(struct ath10k *ar) +{ + struct sk_buff *skb; + u32 cmd_id; + + if (!ar->wmi.ops->gen_wow_enable) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_wow_enable(ar); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + cmd_id = ar->wmi.cmd->wow_enable_cmdid; + return ath10k_wmi_cmd_send(ar, skb, cmd_id); +} + +static inline int +ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id, + enum wmi_wow_wakeup_event event, + u32 enable) +{ + struct sk_buff *skb; + u32 cmd_id; + + if (!ar->wmi.ops->gen_wow_add_wakeup_event) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid; + return ath10k_wmi_cmd_send(ar, skb, cmd_id); +} + +static inline int +ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar) +{ + struct sk_buff *skb; + u32 cmd_id; + + if (!ar->wmi.ops->gen_wow_host_wakeup_ind) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid; + return ath10k_wmi_cmd_send(ar, skb, cmd_id); +} + +static inline int +ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id, + const u8 *pattern, const u8 *mask, + int pattern_len, int pattern_offset) +{ + struct sk_buff *skb; + u32 cmd_id; + + if (!ar->wmi.ops->gen_wow_add_pattern) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id, + pattern, mask, pattern_len, + pattern_offset); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid; + return ath10k_wmi_cmd_send(ar, skb, cmd_id); +} + +static inline int +ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id) +{ + struct sk_buff *skb; + u32 cmd_id; + + if (!ar->wmi.ops->gen_wow_del_pattern) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid; + return ath10k_wmi_cmd_send(ar, skb, cmd_id); +} + +static inline int +ath10k_wmi_wow_config_pno(struct ath10k *ar, u32 vdev_id, + struct wmi_pno_scan_req *pno_scan) +{ + struct sk_buff *skb; + u32 cmd_id; + + if (!ar->wmi.ops->gen_wow_config_pno) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_wow_config_pno(ar, vdev_id, pno_scan); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + cmd_id = ar->wmi.cmd->network_list_offload_config_cmdid; + return ath10k_wmi_cmd_send(ar, skb, cmd_id); +} + +static inline int +ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id, + enum wmi_tdls_state state) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_update_fw_tdls_state) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid); +} + +static inline int +ath10k_wmi_tdls_peer_update(struct ath10k *ar, + const struct wmi_tdls_peer_update_cmd_arg *arg, + const struct wmi_tdls_peer_capab_arg *cap, + const struct wmi_channel_arg *chan) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_tdls_peer_update) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->tdls_peer_update_cmdid); +} + +static inline int +ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_adaptive_qcs) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid); +} + +static inline int +ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pdev_get_tpc_config) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param); + + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_get_tpc_config_cmdid); +} + +static inline int +ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats, + char *buf) +{ + if (!ar->wmi.ops->fw_stats_fill) + return -EOPNOTSUPP; + + ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf); + return 0; +} + +static inline int +ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable, + u32 detect_level, u32 detect_margin) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable, + detect_level, + detect_margin); + + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid); +} + +static inline int +ath10k_wmi_ext_resource_config(struct ath10k *ar, + enum wmi_host_platform_type type, + u32 fw_feature_bitmap) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->ext_resource_config) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->ext_resource_config(ar, type, + fw_feature_bitmap); + + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->ext_resource_cfg_cmdid); +} + +static inline int +ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype) +{ + if (!ar->wmi.ops->get_vdev_subtype) + return -EOPNOTSUPP; + + return ar->wmi.ops->get_vdev_subtype(ar, subtype); +} + +static inline int +ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar, + enum wmi_bss_survey_req_type type) +{ + struct ath10k_wmi *wmi = &ar->wmi; + struct sk_buff *skb; + + if (!wmi->ops->gen_pdev_bss_chan_info_req) + return -EOPNOTSUPP; + + skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + wmi->cmd->pdev_bss_chan_info_request_cmdid); +} + +static inline int +ath10k_wmi_echo(struct ath10k *ar, u32 value) +{ + struct ath10k_wmi *wmi = &ar->wmi; + struct sk_buff *skb; + + if (!wmi->ops->gen_echo) + return -EOPNOTSUPP; + + skb = wmi->ops->gen_echo(ar, value); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid); +} + +static inline int +ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param); + + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_get_tpc_table_cmdid); +} + +static inline int +ath10k_wmi_report_radar_found(struct ath10k *ar, + const struct ath10k_radar_found_info *arg) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_radar_found) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_radar_found(ar, arg); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->radar_found_cmdid); +} + +static inline int +ath10k_wmi_pdev_bb_timing(struct ath10k *ar, + const struct wmi_bb_timing_cfg_arg *arg) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_bb_timing) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_bb_timing(ar, arg); + + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->set_bb_timing_cmdid); +} + +static inline int +ath10k_wmi_set_per_peer_per_tid_cfg(struct ath10k *ar, + const struct wmi_per_peer_per_tid_cfg_arg *arg) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_per_peer_per_tid_cfg) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_per_peer_per_tid_cfg(ar, arg); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->per_peer_per_tid_config_cmdid); +} +#endif diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c new file mode 100644 index 000000000000..16d07d619b4d --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -0,0 +1,4649 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#include "core.h" +#include "debug.h" +#include "mac.h" +#include "hw.h" +#include "wmi.h" +#include "wmi-ops.h" +#include "wmi-tlv.h" +#include "p2p.h" +#include "testmode.h" +#include <linux/bitfield.h> + +/***************/ +/* TLV helpers */ +/**************/ + +struct wmi_tlv_policy { + size_t min_len; +}; + +static const struct wmi_tlv_policy wmi_tlv_policies[] = { + [WMI_TLV_TAG_ARRAY_BYTE] + = { .min_len = 0 }, + [WMI_TLV_TAG_ARRAY_UINT32] + = { .min_len = 0 }, + [WMI_TLV_TAG_STRUCT_SCAN_EVENT] + = { .min_len = sizeof(struct wmi_scan_event) }, + [WMI_TLV_TAG_STRUCT_MGMT_RX_HDR] + = { .min_len = sizeof(struct wmi_tlv_mgmt_rx_ev) }, + [WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT] + = { .min_len = sizeof(struct wmi_chan_info_event) }, + [WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT] + = { .min_len = sizeof(struct wmi_vdev_start_response_event) }, + [WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT] + = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) }, + [WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT] + = { .min_len = sizeof(struct wmi_host_swba_event) }, + [WMI_TLV_TAG_STRUCT_TIM_INFO] + = { .min_len = sizeof(struct wmi_tim_info) }, + [WMI_TLV_TAG_STRUCT_P2P_NOA_INFO] + = { .min_len = sizeof(struct wmi_p2p_noa_info) }, + [WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT] + = { .min_len = sizeof(struct wmi_tlv_svc_rdy_ev) }, + [WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES] + = { .min_len = sizeof(struct hal_reg_capabilities) }, + [WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ] + = { .min_len = sizeof(struct wlan_host_mem_req) }, + [WMI_TLV_TAG_STRUCT_READY_EVENT] + = { .min_len = sizeof(struct wmi_tlv_rdy_ev) }, + [WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT] + = { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) }, + [WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT] + = { .min_len = sizeof(struct wmi_tlv_diag_data_ev) }, + [WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT] + = { .min_len = sizeof(struct wmi_tlv_p2p_noa_ev) }, + [WMI_TLV_TAG_STRUCT_ROAM_EVENT] + = { .min_len = sizeof(struct wmi_tlv_roam_ev) }, + [WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO] + = { .min_len = sizeof(struct wmi_tlv_wow_event_info) }, + [WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT] + = { .min_len = sizeof(struct wmi_tlv_tx_pause_ev) }, +}; + +static int +ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len, + int (*iter)(struct ath10k *ar, u16 tag, u16 len, + const void *ptr, void *data), + void *data) +{ + const void *begin = ptr; + const struct wmi_tlv *tlv; + u16 tlv_tag, tlv_len; + int ret; + + while (len > 0) { + if (len < sizeof(*tlv)) { + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", + ptr - begin, len, sizeof(*tlv)); + return -EINVAL; + } + + tlv = ptr; + tlv_tag = __le16_to_cpu(tlv->tag); + tlv_len = __le16_to_cpu(tlv->len); + ptr += sizeof(*tlv); + len -= sizeof(*tlv); + + if (tlv_len > len) { + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", + tlv_tag, ptr - begin, len, tlv_len); + return -EINVAL; + } + + if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) && + wmi_tlv_policies[tlv_tag].min_len && + wmi_tlv_policies[tlv_tag].min_len > tlv_len) { + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n", + tlv_tag, ptr - begin, tlv_len, + wmi_tlv_policies[tlv_tag].min_len); + return -EINVAL; + } + + ret = iter(ar, tlv_tag, tlv_len, ptr, data); + if (ret) + return ret; + + ptr += tlv_len; + len -= tlv_len; + } + + return 0; +} + +static int ath10k_wmi_tlv_iter_parse(struct ath10k *ar, u16 tag, u16 len, + const void *ptr, void *data) +{ + const void **tb = data; + + if (tag < WMI_TLV_TAG_MAX) + tb[tag] = ptr; + + return 0; +} + +static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb, + const void *ptr, size_t len) +{ + return ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_iter_parse, + (void *)tb); +} + +static const void ** +ath10k_wmi_tlv_parse_alloc(struct ath10k *ar, const void *ptr, + size_t len, gfp_t gfp) +{ + const void **tb; + int ret; + + tb = kcalloc(WMI_TLV_TAG_MAX, sizeof(*tb), gfp); + if (!tb) + return ERR_PTR(-ENOMEM); + + ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len); + if (ret) { + kfree(tb); + return ERR_PTR(ret); + } + + return tb; +} + +static u16 ath10k_wmi_tlv_len(const void *ptr) +{ + return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len); +} + +/**************/ +/* TLV events */ +/**************/ +static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar, + struct sk_buff *skb) +{ + const void **tb; + const struct wmi_tlv_bcn_tx_status_ev *ev; + struct ath10k_vif *arvif; + u32 vdev_id, tx_status; + int ret; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]; + if (!ev) { + kfree(tb); + return -EPROTO; + } + + tx_status = __le32_to_cpu(ev->tx_status); + vdev_id = __le32_to_cpu(ev->vdev_id); + + switch (tx_status) { + case WMI_TLV_BCN_TX_STATUS_OK: + break; + case WMI_TLV_BCN_TX_STATUS_XRETRY: + case WMI_TLV_BCN_TX_STATUS_DROP: + case WMI_TLV_BCN_TX_STATUS_FILTERED: + /* FIXME: It's probably worth telling mac80211 to stop the + * interface as it is crippled. + */ + ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d", + vdev_id, tx_status); + break; + } + + arvif = ath10k_get_arvif(ar, vdev_id); + if (arvif && arvif->is_up && arvif->vif->bss_conf.csa_active) + ieee80211_queue_work(ar->hw, &arvif->ap_csa_work); + + kfree(tb); + return 0; +} + +static void ath10k_wmi_tlv_event_vdev_delete_resp(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_DELETE_RESP_EVENTID\n"); + complete(&ar->vdev_delete_done); +} + +static int ath10k_wmi_tlv_parse_peer_stats_info(struct ath10k *ar, u16 tag, u16 len, + const void *ptr, void *data) +{ + const struct wmi_tlv_peer_stats_info *stat = ptr; + struct ieee80211_sta *sta; + struct ath10k_sta *arsta; + + if (tag != WMI_TLV_TAG_STRUCT_PEER_STATS_INFO) + return -EPROTO; + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tlv stats peer addr %pMF rx rate code 0x%x bit rate %d kbps\n", + stat->peer_macaddr.addr, + __le32_to_cpu(stat->last_rx_rate_code), + __le32_to_cpu(stat->last_rx_bitrate_kbps)); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tlv stats tx rate code 0x%x bit rate %d kbps\n", + __le32_to_cpu(stat->last_tx_rate_code), + __le32_to_cpu(stat->last_tx_bitrate_kbps)); + + rcu_read_lock(); + sta = ieee80211_find_sta_by_ifaddr(ar->hw, stat->peer_macaddr.addr, NULL); + if (!sta) { + rcu_read_unlock(); + ath10k_warn(ar, "not found station for peer stats\n"); + return -EINVAL; + } + + arsta = (struct ath10k_sta *)sta->drv_priv; + arsta->rx_rate_code = __le32_to_cpu(stat->last_rx_rate_code); + arsta->rx_bitrate_kbps = __le32_to_cpu(stat->last_rx_bitrate_kbps); + arsta->tx_rate_code = __le32_to_cpu(stat->last_tx_rate_code); + arsta->tx_bitrate_kbps = __le32_to_cpu(stat->last_tx_bitrate_kbps); + rcu_read_unlock(); + + return 0; +} + +static int ath10k_wmi_tlv_op_pull_peer_stats_info(struct ath10k *ar, + struct sk_buff *skb) +{ + const void **tb; + const struct wmi_tlv_peer_stats_info_ev *ev; + const void *data; + u32 num_peer_stats; + int ret; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_PEER_STATS_INFO_EVENT]; + data = tb[WMI_TLV_TAG_ARRAY_STRUCT]; + + if (!ev || !data) { + kfree(tb); + return -EPROTO; + } + + num_peer_stats = __le32_to_cpu(ev->num_peers); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tlv peer stats info update peer vdev id %d peers %i more data %d\n", + __le32_to_cpu(ev->vdev_id), + num_peer_stats, + __le32_to_cpu(ev->more_data)); + + ret = ath10k_wmi_tlv_iter(ar, data, ath10k_wmi_tlv_len(data), + ath10k_wmi_tlv_parse_peer_stats_info, NULL); + if (ret) + ath10k_warn(ar, "failed to parse stats info tlv: %d\n", ret); + + kfree(tb); + return 0; +} + +static void ath10k_wmi_tlv_event_peer_stats_info(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PEER_STATS_INFO_EVENTID\n"); + ath10k_wmi_tlv_op_pull_peer_stats_info(ar, skb); + complete(&ar->peer_stats_info_complete); +} + +static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar, + struct sk_buff *skb) +{ + const void **tb; + const struct wmi_tlv_diag_data_ev *ev; + const struct wmi_tlv_diag_item *item; + const void *data; + int ret, num_items, len; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]; + data = tb[WMI_TLV_TAG_ARRAY_BYTE]; + if (!ev || !data) { + kfree(tb); + return -EPROTO; + } + + num_items = __le32_to_cpu(ev->num_items); + len = ath10k_wmi_tlv_len(data); + + while (num_items--) { + if (len == 0) + break; + if (len < sizeof(*item)) { + ath10k_warn(ar, "failed to parse diag data: can't fit item header\n"); + break; + } + + item = data; + + if (len < sizeof(*item) + __le16_to_cpu(item->len)) { + ath10k_warn(ar, "failed to parse diag data: item is too long\n"); + break; + } + + trace_ath10k_wmi_diag_container(ar, + item->type, + __le32_to_cpu(item->timestamp), + __le32_to_cpu(item->code), + __le16_to_cpu(item->len), + item->payload); + + len -= sizeof(*item); + len -= roundup(__le16_to_cpu(item->len), 4); + + data += sizeof(*item); + data += roundup(__le16_to_cpu(item->len), 4); + } + + if (num_items != -1 || len != 0) + ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n", + num_items, len); + + kfree(tb); + return 0; +} + +static int ath10k_wmi_tlv_event_diag(struct ath10k *ar, + struct sk_buff *skb) +{ + const void **tb; + const void *data; + int ret, len; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + data = tb[WMI_TLV_TAG_ARRAY_BYTE]; + if (!data) { + kfree(tb); + return -EPROTO; + } + len = ath10k_wmi_tlv_len(data); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len); + trace_ath10k_wmi_diag(ar, data, len); + + kfree(tb); + return 0; +} + +static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar, + struct sk_buff *skb) +{ + const void **tb; + const struct wmi_tlv_p2p_noa_ev *ev; + const struct wmi_p2p_noa_info *noa; + int ret, vdev_id; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT]; + noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO]; + + if (!ev || !noa) { + kfree(tb); + return -EPROTO; + } + + vdev_id = __le32_to_cpu(ev->vdev_id); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tlv p2p noa vdev_id %i descriptors %u\n", + vdev_id, noa->num_descriptors); + + ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa); + kfree(tb); + return 0; +} + +static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar, + struct sk_buff *skb) +{ + const void **tb; + const struct wmi_tlv_tx_pause_ev *ev; + int ret, vdev_id; + u32 pause_id, action, vdev_map, peer_id, tid_map; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT]; + if (!ev) { + kfree(tb); + return -EPROTO; + } + + pause_id = __le32_to_cpu(ev->pause_id); + action = __le32_to_cpu(ev->action); + vdev_map = __le32_to_cpu(ev->vdev_map); + peer_id = __le32_to_cpu(ev->peer_id); + tid_map = __le32_to_cpu(ev->tid_map); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n", + pause_id, action, vdev_map, peer_id, tid_map); + + switch (pause_id) { + case WMI_TLV_TX_PAUSE_ID_MCC: + case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA: + case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS: + case WMI_TLV_TX_PAUSE_ID_AP_PS: + case WMI_TLV_TX_PAUSE_ID_IBSS_PS: + for (vdev_id = 0; vdev_map; vdev_id++) { + if (!(vdev_map & BIT(vdev_id))) + continue; + + vdev_map &= ~BIT(vdev_id); + ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id, + action); + } + break; + case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS: + case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD: + case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA: + case WMI_TLV_TX_PAUSE_ID_HOST: + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac ignoring unsupported tx pause id %d\n", + pause_id); + break; + default: + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac ignoring unknown tx pause vdev %d\n", + pause_id); + break; + } + + kfree(tb); + return 0; +} + +static void ath10k_wmi_tlv_event_rfkill_state_change(struct ath10k *ar, + struct sk_buff *skb) +{ + const struct wmi_tlv_rfkill_state_change_ev *ev; + const void **tb; + bool radio; + int ret; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, + "failed to parse rfkill state change event: %d\n", + ret); + return; + } + + ev = tb[WMI_TLV_TAG_STRUCT_RFKILL_EVENT]; + if (!ev) { + kfree(tb); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_MAC, + "wmi tlv rfkill state change gpio %d type %d radio_state %d\n", + __le32_to_cpu(ev->gpio_pin_num), + __le32_to_cpu(ev->int_type), + __le32_to_cpu(ev->radio_state)); + + radio = (__le32_to_cpu(ev->radio_state) == WMI_TLV_RFKILL_RADIO_STATE_ON); + + spin_lock_bh(&ar->data_lock); + + if (!radio) + ar->hw_rfkill_on = true; + + spin_unlock_bh(&ar->data_lock); + + /* notify cfg80211 radio state change */ + ath10k_mac_rfkill_enable_radio(ar, radio); + wiphy_rfkill_set_hw_state(ar->hw->wiphy, !radio); +} + +static int ath10k_wmi_tlv_event_temperature(struct ath10k *ar, + struct sk_buff *skb) +{ + const struct wmi_tlv_pdev_temperature_event *ev; + + ev = (struct wmi_tlv_pdev_temperature_event *)skb->data; + if (WARN_ON(skb->len < sizeof(*ev))) + return -EPROTO; + + ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature)); + return 0; +} + +static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb) +{ + struct ieee80211_sta *station; + const struct wmi_tlv_tdls_peer_event *ev; + const void **tb; + struct ath10k_vif *arvif; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ath10k_warn(ar, "tdls peer failed to parse tlv"); + return; + } + ev = tb[WMI_TLV_TAG_STRUCT_TDLS_PEER_EVENT]; + if (!ev) { + kfree(tb); + ath10k_warn(ar, "tdls peer NULL event"); + return; + } + + switch (__le32_to_cpu(ev->peer_reason)) { + case WMI_TDLS_TEARDOWN_REASON_TX: + case WMI_TDLS_TEARDOWN_REASON_RSSI: + case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT: + rcu_read_lock(); + station = ieee80211_find_sta_by_ifaddr(ar->hw, + ev->peer_macaddr.addr, + NULL); + if (!station) { + ath10k_warn(ar, "did not find station from tdls peer event"); + goto exit; + } + + arvif = ath10k_get_arvif(ar, __le32_to_cpu(ev->vdev_id)); + if (!arvif) { + ath10k_warn(ar, "no vif for vdev_id %d found", + __le32_to_cpu(ev->vdev_id)); + goto exit; + } + + ieee80211_tdls_oper_request( + arvif->vif, station->addr, + NL80211_TDLS_TEARDOWN, + WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE, + GFP_ATOMIC + ); + break; + default: + kfree(tb); + return; + } + +exit: + rcu_read_unlock(); + kfree(tb); +} + +static int ath10k_wmi_tlv_event_peer_delete_resp(struct ath10k *ar, + struct sk_buff *skb) +{ + struct wmi_peer_delete_resp_ev_arg *arg; + struct wmi_tlv *tlv_hdr; + + tlv_hdr = (struct wmi_tlv *)skb->data; + arg = (struct wmi_peer_delete_resp_ev_arg *)tlv_hdr->value; + + ath10k_dbg(ar, ATH10K_DBG_WMI, "vdev id %d", arg->vdev_id); + ath10k_dbg(ar, ATH10K_DBG_WMI, "peer mac addr %pM", &arg->peer_addr); + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete response\n"); + + complete(&ar->peer_delete_done); + + return 0; +} + +/***********/ +/* TLV ops */ +/***********/ + +static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_cmd_hdr *cmd_hdr; + enum wmi_tlv_event_id id; + bool consumed; + + cmd_hdr = (struct wmi_cmd_hdr *)skb->data; + id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); + + if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) + goto out; + + trace_ath10k_wmi_event(ar, id, skb->data, skb->len); + + consumed = ath10k_tm_event_wmi(ar, id, skb); + + /* Ready event must be handled normally also in UTF mode so that we + * know the UTF firmware has booted, others we are just bypass WMI + * events to testmode. + */ + if (consumed && id != WMI_TLV_READY_EVENTID) { + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tlv testmode consumed 0x%x\n", id); + goto out; + } + + switch (id) { + case WMI_TLV_MGMT_RX_EVENTID: + ath10k_wmi_event_mgmt_rx(ar, skb); + /* mgmt_rx() owns the skb now! */ + return; + case WMI_TLV_SCAN_EVENTID: + ath10k_wmi_event_scan(ar, skb); + break; + case WMI_TLV_CHAN_INFO_EVENTID: + ath10k_wmi_event_chan_info(ar, skb); + break; + case WMI_TLV_ECHO_EVENTID: + ath10k_wmi_event_echo(ar, skb); + break; + case WMI_TLV_DEBUG_MESG_EVENTID: + ath10k_wmi_event_debug_mesg(ar, skb); + break; + case WMI_TLV_UPDATE_STATS_EVENTID: + ath10k_wmi_event_update_stats(ar, skb); + break; + case WMI_TLV_PEER_STATS_INFO_EVENTID: + ath10k_wmi_tlv_event_peer_stats_info(ar, skb); + break; + case WMI_TLV_VDEV_START_RESP_EVENTID: + ath10k_wmi_event_vdev_start_resp(ar, skb); + break; + case WMI_TLV_VDEV_STOPPED_EVENTID: + ath10k_wmi_event_vdev_stopped(ar, skb); + break; + case WMI_TLV_VDEV_DELETE_RESP_EVENTID: + ath10k_wmi_tlv_event_vdev_delete_resp(ar, skb); + break; + case WMI_TLV_PEER_STA_KICKOUT_EVENTID: + ath10k_wmi_event_peer_sta_kickout(ar, skb); + break; + case WMI_TLV_HOST_SWBA_EVENTID: + ath10k_wmi_event_host_swba(ar, skb); + break; + case WMI_TLV_TBTTOFFSET_UPDATE_EVENTID: + ath10k_wmi_event_tbttoffset_update(ar, skb); + break; + case WMI_TLV_PHYERR_EVENTID: + ath10k_wmi_event_phyerr(ar, skb); + break; + case WMI_TLV_ROAM_EVENTID: + ath10k_wmi_event_roam(ar, skb); + break; + case WMI_TLV_PROFILE_MATCH: + ath10k_wmi_event_profile_match(ar, skb); + break; + case WMI_TLV_DEBUG_PRINT_EVENTID: + ath10k_wmi_event_debug_print(ar, skb); + break; + case WMI_TLV_PDEV_QVIT_EVENTID: + ath10k_wmi_event_pdev_qvit(ar, skb); + break; + case WMI_TLV_WLAN_PROFILE_DATA_EVENTID: + ath10k_wmi_event_wlan_profile_data(ar, skb); + break; + case WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID: + ath10k_wmi_event_rtt_measurement_report(ar, skb); + break; + case WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID: + ath10k_wmi_event_tsf_measurement_report(ar, skb); + break; + case WMI_TLV_RTT_ERROR_REPORT_EVENTID: + ath10k_wmi_event_rtt_error_report(ar, skb); + break; + case WMI_TLV_WOW_WAKEUP_HOST_EVENTID: + ath10k_wmi_event_wow_wakeup_host(ar, skb); + break; + case WMI_TLV_DCS_INTERFERENCE_EVENTID: + ath10k_wmi_event_dcs_interference(ar, skb); + break; + case WMI_TLV_PDEV_TPC_CONFIG_EVENTID: + ath10k_wmi_event_pdev_tpc_config(ar, skb); + break; + case WMI_TLV_PDEV_FTM_INTG_EVENTID: + ath10k_wmi_event_pdev_ftm_intg(ar, skb); + break; + case WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID: + ath10k_wmi_event_gtk_offload_status(ar, skb); + break; + case WMI_TLV_GTK_REKEY_FAIL_EVENTID: + ath10k_wmi_event_gtk_rekey_fail(ar, skb); + break; + case WMI_TLV_TX_DELBA_COMPLETE_EVENTID: + ath10k_wmi_event_delba_complete(ar, skb); + break; + case WMI_TLV_TX_ADDBA_COMPLETE_EVENTID: + ath10k_wmi_event_addba_complete(ar, skb); + break; + case WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID: + ath10k_wmi_event_vdev_install_key_complete(ar, skb); + break; + case WMI_TLV_SERVICE_READY_EVENTID: + ath10k_wmi_event_service_ready(ar, skb); + return; + case WMI_TLV_READY_EVENTID: + ath10k_wmi_event_ready(ar, skb); + break; + case WMI_TLV_SERVICE_AVAILABLE_EVENTID: + ath10k_wmi_event_service_available(ar, skb); + break; + case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID: + ath10k_wmi_tlv_event_bcn_tx_status(ar, skb); + break; + case WMI_TLV_DIAG_DATA_CONTAINER_EVENTID: + ath10k_wmi_tlv_event_diag_data(ar, skb); + break; + case WMI_TLV_DIAG_EVENTID: + ath10k_wmi_tlv_event_diag(ar, skb); + break; + case WMI_TLV_P2P_NOA_EVENTID: + ath10k_wmi_tlv_event_p2p_noa(ar, skb); + break; + case WMI_TLV_TX_PAUSE_EVENTID: + ath10k_wmi_tlv_event_tx_pause(ar, skb); + break; + case WMI_TLV_RFKILL_STATE_CHANGE_EVENTID: + ath10k_wmi_tlv_event_rfkill_state_change(ar, skb); + break; + case WMI_TLV_PDEV_TEMPERATURE_EVENTID: + ath10k_wmi_tlv_event_temperature(ar, skb); + break; + case WMI_TLV_TDLS_PEER_EVENTID: + ath10k_wmi_event_tdls_peer(ar, skb); + break; + case WMI_TLV_PEER_DELETE_RESP_EVENTID: + ath10k_wmi_tlv_event_peer_delete_resp(ar, skb); + break; + case WMI_TLV_MGMT_TX_COMPLETION_EVENTID: + ath10k_wmi_event_mgmt_tx_compl(ar, skb); + break; + case WMI_TLV_MGMT_TX_BUNDLE_COMPLETION_EVENTID: + ath10k_wmi_event_mgmt_tx_bundle_compl(ar, skb); + break; + default: + ath10k_dbg(ar, ATH10K_DBG_WMI, "Unknown eventid: %d\n", id); + break; + } + +out: + dev_kfree_skb(skb); +} + +static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_scan_ev_arg *arg) +{ + const void **tb; + const struct wmi_scan_event *ev; + int ret; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_SCAN_EVENT]; + if (!ev) { + kfree(tb); + return -EPROTO; + } + + arg->event_type = ev->event_type; + arg->reason = ev->reason; + arg->channel_freq = ev->channel_freq; + arg->scan_req_id = ev->scan_req_id; + arg->scan_id = ev->scan_id; + arg->vdev_id = ev->vdev_id; + + kfree(tb); + return 0; +} + +static int +ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb, + struct wmi_tlv_mgmt_tx_compl_ev_arg *arg) +{ + const void **tb; + const struct wmi_tlv_mgmt_tx_compl_ev *ev; + int ret; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT]; + if (!ev) { + kfree(tb); + return -EPROTO; + } + + arg->desc_id = ev->desc_id; + arg->status = ev->status; + arg->pdev_id = ev->pdev_id; + arg->ppdu_id = ev->ppdu_id; + + if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) + arg->ack_rssi = ev->ack_rssi; + + kfree(tb); + return 0; +} + +struct wmi_tlv_tx_bundle_compl_parse { + const __le32 *num_reports; + const __le32 *desc_ids; + const __le32 *status; + const __le32 *ppdu_ids; + const __le32 *ack_rssi; + bool desc_ids_done; + bool status_done; + bool ppdu_ids_done; + bool ack_rssi_done; +}; + +static int +ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse(struct ath10k *ar, u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_tlv_tx_bundle_compl_parse *bundle_tx_compl = data; + + switch (tag) { + case WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_BUNDLE_EVENT: + bundle_tx_compl->num_reports = ptr; + break; + case WMI_TLV_TAG_ARRAY_UINT32: + if (!bundle_tx_compl->desc_ids_done) { + bundle_tx_compl->desc_ids_done = true; + bundle_tx_compl->desc_ids = ptr; + } else if (!bundle_tx_compl->status_done) { + bundle_tx_compl->status_done = true; + bundle_tx_compl->status = ptr; + } else if (!bundle_tx_compl->ppdu_ids_done) { + bundle_tx_compl->ppdu_ids_done = true; + bundle_tx_compl->ppdu_ids = ptr; + } else if (!bundle_tx_compl->ack_rssi_done) { + bundle_tx_compl->ack_rssi_done = true; + bundle_tx_compl->ack_rssi = ptr; + } + break; + default: + break; + } + return 0; +} + +static int ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev( + struct ath10k *ar, struct sk_buff *skb, + struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg) +{ + struct wmi_tlv_tx_bundle_compl_parse bundle_tx_compl = { }; + int ret; + + ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len, + ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse, + &bundle_tx_compl); + if (ret) { + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + if (!bundle_tx_compl.num_reports || !bundle_tx_compl.desc_ids || + !bundle_tx_compl.status) + return -EPROTO; + + arg->num_reports = *bundle_tx_compl.num_reports; + arg->desc_ids = bundle_tx_compl.desc_ids; + arg->status = bundle_tx_compl.status; + arg->ppdu_ids = bundle_tx_compl.ppdu_ids; + + if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) + arg->ack_rssi = bundle_tx_compl.ack_rssi; + + return 0; +} + +static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_mgmt_rx_ev_arg *arg) +{ + const void **tb; + const struct wmi_tlv_mgmt_rx_ev *ev; + const u8 *frame; + u32 msdu_len; + int ret, i; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]; + frame = tb[WMI_TLV_TAG_ARRAY_BYTE]; + + if (!ev || !frame) { + kfree(tb); + return -EPROTO; + } + + arg->channel = ev->channel; + arg->buf_len = ev->buf_len; + arg->status = ev->status; + arg->snr = ev->snr; + arg->phy_mode = ev->phy_mode; + arg->rate = ev->rate; + + for (i = 0; i < ARRAY_SIZE(ev->rssi); i++) + arg->rssi[i] = ev->rssi[i]; + + msdu_len = __le32_to_cpu(arg->buf_len); + + if (skb->len < (frame - skb->data) + msdu_len) { + kfree(tb); + return -EPROTO; + } + + /* shift the sk_buff to point to `frame` */ + skb_trim(skb, 0); + skb_put(skb, frame - skb->data); + skb_pull(skb, frame - skb->data); + skb_put(skb, msdu_len); + + kfree(tb); + return 0; +} + +static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_ch_info_ev_arg *arg) +{ + const void **tb; + const struct wmi_tlv_chan_info_event *ev; + int ret; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT]; + if (!ev) { + kfree(tb); + return -EPROTO; + } + + arg->err_code = ev->err_code; + arg->freq = ev->freq; + arg->cmd_flags = ev->cmd_flags; + arg->noise_floor = ev->noise_floor; + arg->rx_clear_count = ev->rx_clear_count; + arg->cycle_count = ev->cycle_count; + if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL, + ar->running_fw->fw_file.fw_features)) + arg->mac_clk_mhz = ev->mac_clk_mhz; + + kfree(tb); + return 0; +} + +static int +ath10k_wmi_tlv_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb, + struct wmi_vdev_start_ev_arg *arg) +{ + const void **tb; + const struct wmi_vdev_start_response_event *ev; + int ret; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT]; + if (!ev) { + kfree(tb); + return -EPROTO; + } + + skb_pull(skb, sizeof(*ev)); + arg->vdev_id = ev->vdev_id; + arg->req_id = ev->req_id; + arg->resp_type = ev->resp_type; + arg->status = ev->status; + + kfree(tb); + return 0; +} + +static int ath10k_wmi_tlv_op_pull_peer_kick_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_peer_kick_ev_arg *arg) +{ + const void **tb; + const struct wmi_peer_sta_kickout_event *ev; + int ret; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT]; + if (!ev) { + kfree(tb); + return -EPROTO; + } + + arg->mac_addr = ev->peer_macaddr.addr; + + kfree(tb); + return 0; +} + +struct wmi_tlv_swba_parse { + const struct wmi_host_swba_event *ev; + bool tim_done; + bool noa_done; + size_t n_tim; + size_t n_noa; + struct wmi_swba_ev_arg *arg; +}; + +static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_tlv_swba_parse *swba = data; + struct wmi_tim_info_arg *tim_info_arg; + const struct wmi_tim_info *tim_info_ev = ptr; + + if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO) + return -EPROTO; + + if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info)) + return -ENOBUFS; + + if (__le32_to_cpu(tim_info_ev->tim_len) > + sizeof(tim_info_ev->tim_bitmap)) { + ath10k_warn(ar, "refusing to parse invalid swba structure\n"); + return -EPROTO; + } + + tim_info_arg = &swba->arg->tim_info[swba->n_tim]; + tim_info_arg->tim_len = tim_info_ev->tim_len; + tim_info_arg->tim_mcast = tim_info_ev->tim_mcast; + tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap; + tim_info_arg->tim_changed = tim_info_ev->tim_changed; + tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending; + + swba->n_tim++; + + return 0; +} + +static int ath10k_wmi_tlv_swba_noa_parse(struct ath10k *ar, u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_tlv_swba_parse *swba = data; + + if (tag != WMI_TLV_TAG_STRUCT_P2P_NOA_INFO) + return -EPROTO; + + if (swba->n_noa >= ARRAY_SIZE(swba->arg->noa_info)) + return -ENOBUFS; + + swba->arg->noa_info[swba->n_noa++] = ptr; + return 0; +} + +static int ath10k_wmi_tlv_swba_parse(struct ath10k *ar, u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_tlv_swba_parse *swba = data; + int ret; + + switch (tag) { + case WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT: + swba->ev = ptr; + break; + case WMI_TLV_TAG_ARRAY_STRUCT: + if (!swba->tim_done) { + swba->tim_done = true; + ret = ath10k_wmi_tlv_iter(ar, ptr, len, + ath10k_wmi_tlv_swba_tim_parse, + swba); + if (ret) + return ret; + } else if (!swba->noa_done) { + swba->noa_done = true; + ret = ath10k_wmi_tlv_iter(ar, ptr, len, + ath10k_wmi_tlv_swba_noa_parse, + swba); + if (ret) + return ret; + } + break; + default: + break; + } + return 0; +} + +static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_swba_ev_arg *arg) +{ + struct wmi_tlv_swba_parse swba = { .arg = arg }; + u32 map; + size_t n_vdevs; + int ret; + + ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len, + ath10k_wmi_tlv_swba_parse, &swba); + if (ret) { + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + if (!swba.ev) + return -EPROTO; + + arg->vdev_map = swba.ev->vdev_map; + + for (map = __le32_to_cpu(arg->vdev_map), n_vdevs = 0; map; map >>= 1) + if (map & BIT(0)) + n_vdevs++; + + if (n_vdevs != swba.n_tim || + n_vdevs != swba.n_noa) + return -EPROTO; + + return 0; +} + +static int ath10k_wmi_tlv_op_pull_phyerr_ev_hdr(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_phyerr_hdr_arg *arg) +{ + const void **tb; + const struct wmi_tlv_phyerr_ev *ev; + const void *phyerrs; + int ret; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR]; + phyerrs = tb[WMI_TLV_TAG_ARRAY_BYTE]; + + if (!ev || !phyerrs) { + kfree(tb); + return -EPROTO; + } + + arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs); + arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32); + arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32); + arg->buf_len = __le32_to_cpu(ev->buf_len); + arg->phyerrs = phyerrs; + + kfree(tb); + return 0; +} + +#define WMI_TLV_ABI_VER_NS0 0x5F414351 +#define WMI_TLV_ABI_VER_NS1 0x00004C4D +#define WMI_TLV_ABI_VER_NS2 0x00000000 +#define WMI_TLV_ABI_VER_NS3 0x00000000 + +#define WMI_TLV_ABI_VER0_MAJOR 1 +#define WMI_TLV_ABI_VER0_MINOR 0 +#define WMI_TLV_ABI_VER0 ((((WMI_TLV_ABI_VER0_MAJOR) << 24) & 0xFF000000) | \ + (((WMI_TLV_ABI_VER0_MINOR) << 0) & 0x00FFFFFF)) +#define WMI_TLV_ABI_VER1 53 + +static int +ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_svc_rdy_ev_arg *arg = data; + int i; + + if (tag != WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ) + return -EPROTO; + + for (i = 0; i < ARRAY_SIZE(arg->mem_reqs); i++) { + if (!arg->mem_reqs[i]) { + arg->mem_reqs[i] = ptr; + return 0; + } + } + + return -ENOMEM; +} + +struct wmi_tlv_svc_rdy_parse { + const struct hal_reg_capabilities *reg; + const struct wmi_tlv_svc_rdy_ev *ev; + const __le32 *svc_bmap; + const struct wlan_host_mem_req *mem_reqs; + bool svc_bmap_done; + bool dbs_hw_mode_done; +}; + +static int ath10k_wmi_tlv_svc_rdy_parse(struct ath10k *ar, u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_tlv_svc_rdy_parse *svc_rdy = data; + + switch (tag) { + case WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT: + svc_rdy->ev = ptr; + break; + case WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES: + svc_rdy->reg = ptr; + break; + case WMI_TLV_TAG_ARRAY_STRUCT: + svc_rdy->mem_reqs = ptr; + break; + case WMI_TLV_TAG_ARRAY_UINT32: + if (!svc_rdy->svc_bmap_done) { + svc_rdy->svc_bmap_done = true; + svc_rdy->svc_bmap = ptr; + } else if (!svc_rdy->dbs_hw_mode_done) { + svc_rdy->dbs_hw_mode_done = true; + } + break; + default: + break; + } + return 0; +} + +static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_svc_rdy_ev_arg *arg) +{ + const struct hal_reg_capabilities *reg; + const struct wmi_tlv_svc_rdy_ev *ev; + const __le32 *svc_bmap; + const struct wlan_host_mem_req *mem_reqs; + struct wmi_tlv_svc_rdy_parse svc_rdy = { }; + int ret; + + ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len, + ath10k_wmi_tlv_svc_rdy_parse, &svc_rdy); + if (ret) { + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = svc_rdy.ev; + reg = svc_rdy.reg; + svc_bmap = svc_rdy.svc_bmap; + mem_reqs = svc_rdy.mem_reqs; + + if (!ev || !reg || !svc_bmap || !mem_reqs) + return -EPROTO; + + /* This is an internal ABI compatibility check for WMI TLV so check it + * here instead of the generic WMI code. + */ + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tlv abi 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x\n", + __le32_to_cpu(ev->abi.abi_ver0), WMI_TLV_ABI_VER0, + __le32_to_cpu(ev->abi.abi_ver_ns0), WMI_TLV_ABI_VER_NS0, + __le32_to_cpu(ev->abi.abi_ver_ns1), WMI_TLV_ABI_VER_NS1, + __le32_to_cpu(ev->abi.abi_ver_ns2), WMI_TLV_ABI_VER_NS2, + __le32_to_cpu(ev->abi.abi_ver_ns3), WMI_TLV_ABI_VER_NS3); + + if (__le32_to_cpu(ev->abi.abi_ver0) != WMI_TLV_ABI_VER0 || + __le32_to_cpu(ev->abi.abi_ver_ns0) != WMI_TLV_ABI_VER_NS0 || + __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 || + __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 || + __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) { + return -EOPNOTSUPP; + } + + arg->min_tx_power = ev->hw_min_tx_power; + arg->max_tx_power = ev->hw_max_tx_power; + arg->ht_cap = ev->ht_cap_info; + arg->vht_cap = ev->vht_cap_info; + arg->vht_supp_mcs = ev->vht_supp_mcs; + arg->sw_ver0 = ev->abi.abi_ver0; + arg->sw_ver1 = ev->abi.abi_ver1; + arg->fw_build = ev->fw_build_vers; + arg->phy_capab = ev->phy_capability; + arg->num_rf_chains = ev->num_rf_chains; + arg->eeprom_rd = reg->eeprom_rd; + arg->low_2ghz_chan = reg->low_2ghz_chan; + arg->high_2ghz_chan = reg->high_2ghz_chan; + arg->low_5ghz_chan = reg->low_5ghz_chan; + arg->high_5ghz_chan = reg->high_5ghz_chan; + arg->num_mem_reqs = ev->num_mem_reqs; + arg->service_map = svc_bmap; + arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap); + arg->sys_cap_info = ev->sys_cap_info; + + ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs), + ath10k_wmi_tlv_parse_mem_reqs, arg); + if (ret) { + ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret); + return ret; + } + + return 0; +} + +static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_rdy_ev_arg *arg) +{ + const void **tb; + const struct wmi_tlv_rdy_ev *ev; + int ret; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_READY_EVENT]; + if (!ev) { + kfree(tb); + return -EPROTO; + } + + arg->sw_version = ev->abi.abi_ver0; + arg->abi_version = ev->abi.abi_ver1; + arg->status = ev->status; + arg->mac_addr = ev->mac_addr.addr; + + kfree(tb); + return 0; +} + +static int ath10k_wmi_tlv_svc_avail_parse(struct ath10k *ar, u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_svc_avail_ev_arg *arg = data; + + switch (tag) { + case WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT: + arg->service_map_ext_valid = true; + arg->service_map_ext_len = *(__le32 *)ptr; + arg->service_map_ext = ptr + sizeof(__le32); + return 0; + default: + break; + } + + return 0; +} + +static int ath10k_wmi_tlv_op_pull_svc_avail(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_svc_avail_ev_arg *arg) +{ + int ret; + + ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len, + ath10k_wmi_tlv_svc_avail_parse, arg); + + if (ret) { + ath10k_warn(ar, "failed to parse svc_avail tlv: %d\n", ret); + return ret; + } + + return 0; +} + +static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src, + struct ath10k_fw_stats_vdev *dst) +{ + int i; + + dst->vdev_id = __le32_to_cpu(src->vdev_id); + dst->beacon_snr = __le32_to_cpu(src->beacon_snr); + dst->data_snr = __le32_to_cpu(src->data_snr); + dst->num_rx_frames = __le32_to_cpu(src->num_rx_frames); + dst->num_rts_fail = __le32_to_cpu(src->num_rts_fail); + dst->num_rts_success = __le32_to_cpu(src->num_rts_success); + dst->num_rx_err = __le32_to_cpu(src->num_rx_err); + dst->num_rx_discard = __le32_to_cpu(src->num_rx_discard); + dst->num_tx_not_acked = __le32_to_cpu(src->num_tx_not_acked); + + for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++) + dst->num_tx_frames[i] = + __le32_to_cpu(src->num_tx_frames[i]); + + for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++) + dst->num_tx_frames_retries[i] = + __le32_to_cpu(src->num_tx_frames_retries[i]); + + for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++) + dst->num_tx_frames_failures[i] = + __le32_to_cpu(src->num_tx_frames_failures[i]); + + for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++) + dst->tx_rate_history[i] = + __le32_to_cpu(src->tx_rate_history[i]); + + for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++) + dst->beacon_rssi_history[i] = + __le32_to_cpu(src->beacon_rssi_history[i]); +} + +static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, + struct sk_buff *skb, + struct ath10k_fw_stats *stats) +{ + const void **tb; + const struct wmi_tlv_stats_ev *ev; + u32 num_peer_stats_extd; + const void *data; + u32 num_pdev_stats; + u32 num_vdev_stats; + u32 num_peer_stats; + u32 num_bcnflt_stats; + u32 num_chan_stats; + size_t data_len; + u32 stats_id; + int ret; + int i; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_STATS_EVENT]; + data = tb[WMI_TLV_TAG_ARRAY_BYTE]; + + if (!ev || !data) { + kfree(tb); + return -EPROTO; + } + + data_len = ath10k_wmi_tlv_len(data); + num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); + num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); + num_peer_stats = __le32_to_cpu(ev->num_peer_stats); + num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats); + num_chan_stats = __le32_to_cpu(ev->num_chan_stats); + stats_id = __le32_to_cpu(ev->stats_id); + num_peer_stats_extd = __le32_to_cpu(ev->num_peer_stats_extd); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i peer_extd %i\n", + num_pdev_stats, num_vdev_stats, num_peer_stats, + num_bcnflt_stats, num_chan_stats, num_peer_stats_extd); + + for (i = 0; i < num_pdev_stats; i++) { + const struct wmi_pdev_stats *src; + struct ath10k_fw_stats_pdev *dst; + + src = data; + if (data_len < sizeof(*src)) { + kfree(tb); + return -EPROTO; + } + + data += sizeof(*src); + data_len -= sizeof(*src); + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ath10k_wmi_pull_pdev_stats_base(&src->base, dst); + ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst); + ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst); + list_add_tail(&dst->list, &stats->pdevs); + } + + for (i = 0; i < num_vdev_stats; i++) { + const struct wmi_tlv_vdev_stats *src; + struct ath10k_fw_stats_vdev *dst; + + src = data; + if (data_len < sizeof(*src)) { + kfree(tb); + return -EPROTO; + } + + data += sizeof(*src); + data_len -= sizeof(*src); + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ath10k_wmi_tlv_pull_vdev_stats(src, dst); + list_add_tail(&dst->list, &stats->vdevs); + } + + for (i = 0; i < num_peer_stats; i++) { + const struct wmi_10x_peer_stats *src; + struct ath10k_fw_stats_peer *dst; + + src = data; + if (data_len < sizeof(*src)) { + kfree(tb); + return -EPROTO; + } + + data += sizeof(*src); + data_len -= sizeof(*src); + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ath10k_wmi_pull_peer_stats(&src->old, dst); + dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate); + + if (stats_id & WMI_TLV_STAT_PEER_EXTD) { + const struct wmi_tlv_peer_stats_extd *extd; + unsigned long rx_duration_high; + + extd = data + sizeof(*src) * (num_peer_stats - i - 1) + + sizeof(*extd) * i; + + dst->rx_duration = __le32_to_cpu(extd->rx_duration); + rx_duration_high = __le32_to_cpu + (extd->rx_duration_high); + + if (test_bit(WMI_TLV_PEER_RX_DURATION_HIGH_VALID_BIT, + &rx_duration_high)) { + rx_duration_high = + FIELD_GET(WMI_TLV_PEER_RX_DURATION_HIGH_MASK, + rx_duration_high); + dst->rx_duration |= (u64)rx_duration_high << + WMI_TLV_PEER_RX_DURATION_SHIFT; + } + } + + list_add_tail(&dst->list, &stats->peers); + } + + kfree(tb); + return 0; +} + +static int ath10k_wmi_tlv_op_pull_roam_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_roam_ev_arg *arg) +{ + const void **tb; + const struct wmi_tlv_roam_ev *ev; + int ret; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT]; + if (!ev) { + kfree(tb); + return -EPROTO; + } + + arg->vdev_id = ev->vdev_id; + arg->reason = ev->reason; + arg->rssi = ev->rssi; + + kfree(tb); + return 0; +} + +static int +ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb, + struct wmi_wow_ev_arg *arg) +{ + const void **tb; + const struct wmi_tlv_wow_event_info *ev; + int ret; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO]; + if (!ev) { + kfree(tb); + return -EPROTO; + } + + arg->vdev_id = __le32_to_cpu(ev->vdev_id); + arg->flag = __le32_to_cpu(ev->flag); + arg->wake_reason = __le32_to_cpu(ev->wake_reason); + arg->data_len = __le32_to_cpu(ev->data_len); + + kfree(tb); + return 0; +} + +static int ath10k_wmi_tlv_op_pull_echo_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_echo_ev_arg *arg) +{ + const void **tb; + const struct wmi_echo_event *ev; + int ret; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_ECHO_EVENT]; + if (!ev) { + kfree(tb); + return -EPROTO; + } + + arg->value = ev->value; + + kfree(tb); + return 0; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt) +{ + struct wmi_tlv_pdev_suspend *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->opt = __cpu_to_le32(opt); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev suspend\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_pdev_resume(struct ath10k *ar) +{ + struct wmi_tlv_resume_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->reserved = __cpu_to_le32(0); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev resume\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar, + u16 rd, u16 rd2g, u16 rd5g, + u16 ctl2g, u16 ctl5g, + enum wmi_dfs_region dfs_reg) +{ + struct wmi_tlv_pdev_set_rd_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->regd = __cpu_to_le32(rd); + cmd->regd_2ghz = __cpu_to_le32(rd2g); + cmd->regd_5ghz = __cpu_to_le32(rd5g); + cmd->conform_limit_2ghz = __cpu_to_le32(ctl2g); + cmd->conform_limit_5ghz = __cpu_to_le32(ctl5g); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n"); + return skb; +} + +static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar) +{ + return WMI_TXBF_CONF_AFTER_ASSOC; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id, + u32 param_value) +{ + struct wmi_tlv_pdev_set_param_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->param_id = __cpu_to_le32(param_id); + cmd->param_value = __cpu_to_le32(param_value); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param %d value 0x%x\n", + param_id, param_value); + return skb; +} + +static void +ath10k_wmi_tlv_put_host_mem_chunks(struct ath10k *ar, void *host_mem_chunks) +{ + struct host_memory_chunk_tlv *chunk; + struct wmi_tlv *tlv; + dma_addr_t paddr; + int i; + __le16 tlv_len, tlv_tag; + + tlv_tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WLAN_HOST_MEMORY_CHUNK); + tlv_len = __cpu_to_le16(sizeof(*chunk)); + for (i = 0; i < ar->wmi.num_mem_chunks; i++) { + tlv = host_mem_chunks; + tlv->tag = tlv_tag; + tlv->len = tlv_len; + chunk = (void *)tlv->value; + + chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); + chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len); + chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); + + if (test_bit(WMI_SERVICE_SUPPORT_EXTEND_ADDRESS, + ar->wmi.svc_map)) { + paddr = ar->wmi.mem_chunks[i].paddr; + chunk->ptr_high = __cpu_to_le32(upper_32_bits(paddr)); + } + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi-tlv chunk %d len %d, addr 0x%llx, id 0x%x\n", + i, + ar->wmi.mem_chunks[i].len, + (unsigned long long)ar->wmi.mem_chunks[i].paddr, + ar->wmi.mem_chunks[i].req_id); + + host_mem_chunks += sizeof(*tlv); + host_mem_chunks += sizeof(*chunk); + } +} + +static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) +{ + struct sk_buff *skb; + struct wmi_tlv *tlv; + struct wmi_tlv_init_cmd *cmd; + struct wmi_tlv_resource_config *cfg; + void *chunks; + size_t len, chunks_len; + void *ptr; + + chunks_len = ar->wmi.num_mem_chunks * + (sizeof(struct host_memory_chunk_tlv) + sizeof(*tlv)); + len = (sizeof(*tlv) + sizeof(*cmd)) + + (sizeof(*tlv) + sizeof(*cfg)) + + (sizeof(*tlv) + chunks_len); + + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = skb->data; + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_INIT_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG); + tlv->len = __cpu_to_le16(sizeof(*cfg)); + cfg = (void *)tlv->value; + ptr += sizeof(*tlv); + ptr += sizeof(*cfg); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); + tlv->len = __cpu_to_le16(chunks_len); + chunks = (void *)tlv->value; + + ptr += sizeof(*tlv); + ptr += chunks_len; + + cmd->abi.abi_ver0 = __cpu_to_le32(WMI_TLV_ABI_VER0); + cmd->abi.abi_ver1 = __cpu_to_le32(WMI_TLV_ABI_VER1); + cmd->abi.abi_ver_ns0 = __cpu_to_le32(WMI_TLV_ABI_VER_NS0); + cmd->abi.abi_ver_ns1 = __cpu_to_le32(WMI_TLV_ABI_VER_NS1); + cmd->abi.abi_ver_ns2 = __cpu_to_le32(WMI_TLV_ABI_VER_NS2); + cmd->abi.abi_ver_ns3 = __cpu_to_le32(WMI_TLV_ABI_VER_NS3); + cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); + + cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); + + if (ar->hw_params.num_peers) + cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers); + else + cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS); + cfg->ast_skid_limit = __cpu_to_le32(ar->hw_params.ast_skid_limit); + cfg->num_wds_entries = __cpu_to_le32(ar->hw_params.num_wds_entries); + + if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) { + cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); + cfg->num_offload_reorder_bufs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); + } else { + cfg->num_offload_peers = __cpu_to_le32(0); + cfg->num_offload_reorder_bufs = __cpu_to_le32(0); + } + + cfg->num_peer_keys = __cpu_to_le32(2); + if (ar->hw_params.num_peers) + cfg->num_tids = __cpu_to_le32(ar->hw_params.num_peers * 2); + else + cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS); + cfg->tx_chain_mask = __cpu_to_le32(0x7); + cfg->rx_chain_mask = __cpu_to_le32(0x7); + cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64); + cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64); + cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64); + cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28); + cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode); + cfg->scan_max_pending_reqs = __cpu_to_le32(4); + cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); + cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); + cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8); + cfg->num_mcast_groups = __cpu_to_le32(0); + cfg->num_mcast_table_elems = __cpu_to_le32(0); + cfg->mcast2ucast_mode = __cpu_to_le32(0); + cfg->tx_dbg_log_size = __cpu_to_le32(0x400); + cfg->dma_burst_size = __cpu_to_le32(0); + cfg->mac_aggr_delim = __cpu_to_le32(0); + cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0); + cfg->vow_config = __cpu_to_le32(0); + cfg->gtk_offload_max_vdev = __cpu_to_le32(2); + cfg->num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx); + cfg->max_frag_entries = __cpu_to_le32(2); + cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS); + cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20); + cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2); + cfg->num_multicast_filter_entries = __cpu_to_le32(5); + cfg->num_wow_filters = __cpu_to_le32(ar->wow.max_num_patterns); + cfg->num_keep_alive_pattern = __cpu_to_le32(6); + cfg->keep_alive_pattern_size = __cpu_to_le32(0); + cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1); + cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1); + cfg->wmi_send_separate = __cpu_to_le32(0); + cfg->num_ocb_vdevs = __cpu_to_le32(0); + cfg->num_ocb_channels = __cpu_to_le32(0); + cfg->num_ocb_schedules = __cpu_to_le32(0); + cfg->host_capab = __cpu_to_le32(WMI_TLV_FLAG_MGMT_BUNDLE_TX_COMPL); + + if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) + cfg->host_capab |= __cpu_to_le32(WMI_RSRC_CFG_FLAG_TX_ACK_RSSI); + + ath10k_wmi_tlv_put_host_mem_chunks(ar, chunks); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar, + const struct wmi_start_scan_arg *arg) +{ + struct wmi_tlv_start_scan_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + size_t len, chan_len, ssid_len, bssid_len, ie_len; + __le32 *chans; + struct wmi_ssid *ssids; + struct wmi_mac_addr *addrs; + void *ptr; + int i, ret; + + ret = ath10k_wmi_start_scan_verify(arg); + if (ret) + return ERR_PTR(ret); + + chan_len = arg->n_channels * sizeof(__le32); + ssid_len = arg->n_ssids * sizeof(struct wmi_ssid); + bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr); + ie_len = roundup(arg->ie_len, 4); + len = (sizeof(*tlv) + sizeof(*cmd)) + + sizeof(*tlv) + chan_len + + sizeof(*tlv) + ssid_len + + sizeof(*tlv) + bssid_len + + sizeof(*tlv) + ie_len; + + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_START_SCAN_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + + ath10k_wmi_put_start_scan_common(&cmd->common, arg); + cmd->burst_duration_ms = __cpu_to_le32(arg->burst_duration_ms); + cmd->num_channels = __cpu_to_le32(arg->n_channels); + cmd->num_ssids = __cpu_to_le32(arg->n_ssids); + cmd->num_bssids = __cpu_to_le32(arg->n_bssids); + cmd->ie_len = __cpu_to_le32(arg->ie_len); + cmd->num_probes = __cpu_to_le32(3); + ether_addr_copy(cmd->mac_addr.addr, arg->mac_addr.addr); + ether_addr_copy(cmd->mac_mask.addr, arg->mac_mask.addr); + + /* FIXME: There are some scan flag inconsistencies across firmwares, + * e.g. WMI-TLV inverts the logic behind the following flag. + */ + cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ); + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); + tlv->len = __cpu_to_le16(chan_len); + chans = (void *)tlv->value; + for (i = 0; i < arg->n_channels; i++) + chans[i] = __cpu_to_le32(arg->channels[i]); + + ptr += sizeof(*tlv); + ptr += chan_len; + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT); + tlv->len = __cpu_to_le16(ssid_len); + ssids = (void *)tlv->value; + for (i = 0; i < arg->n_ssids; i++) { + ssids[i].ssid_len = __cpu_to_le32(arg->ssids[i].len); + memcpy(ssids[i].ssid, arg->ssids[i].ssid, arg->ssids[i].len); + } + + ptr += sizeof(*tlv); + ptr += ssid_len; + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT); + tlv->len = __cpu_to_le16(bssid_len); + addrs = (void *)tlv->value; + for (i = 0; i < arg->n_bssids; i++) + ether_addr_copy(addrs[i].addr, arg->bssids[i].bssid); + + ptr += sizeof(*tlv); + ptr += bssid_len; + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); + tlv->len = __cpu_to_le16(ie_len); + memcpy(tlv->value, arg->ie, arg->ie_len); + + ptr += sizeof(*tlv); + ptr += ie_len; + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start scan\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_stop_scan(struct ath10k *ar, + const struct wmi_stop_scan_arg *arg) +{ + struct wmi_stop_scan_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + u32 scan_id; + u32 req_id; + + if (arg->req_id > 0xFFF) + return ERR_PTR(-EINVAL); + if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF) + return ERR_PTR(-EINVAL); + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + scan_id = arg->u.scan_id; + scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX; + + req_id = arg->req_id; + req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX; + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->req_type = __cpu_to_le32(arg->req_type); + cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id); + cmd->scan_id = __cpu_to_le32(scan_id); + cmd->scan_req_id = __cpu_to_le32(req_id); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop scan\n"); + return skb; +} + +static int ath10k_wmi_tlv_op_get_vdev_subtype(struct ath10k *ar, + enum wmi_vdev_subtype subtype) +{ + switch (subtype) { + case WMI_VDEV_SUBTYPE_NONE: + return WMI_TLV_VDEV_SUBTYPE_NONE; + case WMI_VDEV_SUBTYPE_P2P_DEVICE: + return WMI_TLV_VDEV_SUBTYPE_P2P_DEV; + case WMI_VDEV_SUBTYPE_P2P_CLIENT: + return WMI_TLV_VDEV_SUBTYPE_P2P_CLI; + case WMI_VDEV_SUBTYPE_P2P_GO: + return WMI_TLV_VDEV_SUBTYPE_P2P_GO; + case WMI_VDEV_SUBTYPE_PROXY_STA: + return WMI_TLV_VDEV_SUBTYPE_PROXY_STA; + case WMI_VDEV_SUBTYPE_MESH_11S: + return WMI_TLV_VDEV_SUBTYPE_MESH_11S; + case WMI_VDEV_SUBTYPE_MESH_NON_11S: + return -EOPNOTSUPP; + } + return -EOPNOTSUPP; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_vdev_create(struct ath10k *ar, + u32 vdev_id, + enum wmi_vdev_type vdev_type, + enum wmi_vdev_subtype vdev_subtype, + const u8 mac_addr[ETH_ALEN]) +{ + struct wmi_vdev_create_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->vdev_type = __cpu_to_le32(vdev_type); + cmd->vdev_subtype = __cpu_to_le32(vdev_subtype); + ether_addr_copy(cmd->vdev_macaddr.addr, mac_addr); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev create\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id) +{ + struct wmi_vdev_delete_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev delete\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar, + const struct wmi_vdev_start_request_arg *arg, + bool restart) +{ + struct wmi_tlv_vdev_start_cmd *cmd; + struct wmi_channel *ch; + struct wmi_tlv *tlv; + struct sk_buff *skb; + size_t len; + void *ptr; + u32 flags = 0; + + if (WARN_ON(arg->hidden_ssid && !arg->ssid)) + return ERR_PTR(-EINVAL); + if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) + return ERR_PTR(-EINVAL); + + len = (sizeof(*tlv) + sizeof(*cmd)) + + (sizeof(*tlv) + sizeof(*ch)) + + (sizeof(*tlv) + 0); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + if (arg->hidden_ssid) + flags |= WMI_VDEV_START_HIDDEN_SSID; + if (arg->pmf_enabled) + flags |= WMI_VDEV_START_PMF_ENABLED; + + ptr = (void *)skb->data; + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(arg->vdev_id); + cmd->bcn_intval = __cpu_to_le32(arg->bcn_intval); + cmd->dtim_period = __cpu_to_le32(arg->dtim_period); + cmd->flags = __cpu_to_le32(flags); + cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate); + cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power); + cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack); + + if (arg->ssid) { + cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len); + memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len); + } + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL); + tlv->len = __cpu_to_le16(sizeof(*ch)); + ch = (void *)tlv->value; + ath10k_wmi_put_wmi_channel(ar, ch, &arg->channel); + + ptr += sizeof(*tlv); + ptr += sizeof(*ch); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); + tlv->len = 0; + + /* Note: This is a nested TLV containing: + * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv].. + */ + + ptr += sizeof(*tlv); + ptr += 0; + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev start\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id) +{ + struct wmi_vdev_stop_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev stop\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, + const u8 *bssid) + +{ + struct wmi_vdev_up_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_UP_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->vdev_assoc_id = __cpu_to_le32(aid); + ether_addr_copy(cmd->vdev_bssid.addr, bssid); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev up\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id) +{ + struct wmi_vdev_down_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev down\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id, + u32 param_id, u32 param_value) +{ + struct wmi_vdev_set_param_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->param_id = __cpu_to_le32(param_id); + cmd->param_value = __cpu_to_le32(param_value); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev %d set param %d value 0x%x\n", + vdev_id, param_id, param_value); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar, + const struct wmi_vdev_install_key_arg *arg) +{ + struct wmi_vdev_install_key_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + size_t len; + void *ptr; + + if (arg->key_cipher == ar->wmi_key_cipher[WMI_CIPHER_NONE] && + arg->key_data) + return ERR_PTR(-EINVAL); + if (arg->key_cipher != ar->wmi_key_cipher[WMI_CIPHER_NONE] && + !arg->key_data) + return ERR_PTR(-EINVAL); + + len = sizeof(*tlv) + sizeof(*cmd) + + sizeof(*tlv) + roundup(arg->key_len, sizeof(__le32)); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(arg->vdev_id); + cmd->key_idx = __cpu_to_le32(arg->key_idx); + cmd->key_flags = __cpu_to_le32(arg->key_flags); + cmd->key_cipher = __cpu_to_le32(arg->key_cipher); + cmd->key_len = __cpu_to_le32(arg->key_len); + cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len); + cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len); + + if (arg->macaddr) + ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr); + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); + tlv->len = __cpu_to_le16(roundup(arg->key_len, sizeof(__le32))); + if (arg->key_data) + memcpy(tlv->value, arg->key_data, arg->key_len); + + ptr += sizeof(*tlv); + ptr += roundup(arg->key_len, sizeof(__le32)); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev install key\n"); + return skb; +} + +static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr, + const struct wmi_sta_uapsd_auto_trig_arg *arg) +{ + struct wmi_sta_uapsd_auto_trig_param *ac; + struct wmi_tlv *tlv; + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM); + tlv->len = __cpu_to_le16(sizeof(*ac)); + ac = (void *)tlv->value; + + ac->wmm_ac = __cpu_to_le32(arg->wmm_ac); + ac->user_priority = __cpu_to_le32(arg->user_priority); + ac->service_interval = __cpu_to_le32(arg->service_interval); + ac->suspend_interval = __cpu_to_le32(arg->suspend_interval); + ac->delay_interval = __cpu_to_le32(arg->delay_interval); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tlv vdev sta uapsd auto trigger ac %d prio %d svc int %d susp int %d delay int %d\n", + ac->wmm_ac, ac->user_priority, ac->service_interval, + ac->suspend_interval, ac->delay_interval); + + return ptr + sizeof(*tlv) + sizeof(*ac); +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN], + const struct wmi_sta_uapsd_auto_trig_arg *args, + u32 num_ac) +{ + struct wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd; + struct wmi_sta_uapsd_auto_trig_param *ac; + struct wmi_tlv *tlv; + struct sk_buff *skb; + size_t len; + size_t ac_tlv_len; + void *ptr; + int i; + + ac_tlv_len = num_ac * (sizeof(*tlv) + sizeof(*ac)); + len = sizeof(*tlv) + sizeof(*cmd) + + sizeof(*tlv) + ac_tlv_len; + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->num_ac = __cpu_to_le32(num_ac); + ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); + tlv->len = __cpu_to_le16(ac_tlv_len); + ac = (void *)tlv->value; + + ptr += sizeof(*tlv); + for (i = 0; i < num_ac; i++) + ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n"); + return skb; +} + +static void *ath10k_wmi_tlv_put_wmm(void *ptr, + const struct wmi_wmm_params_arg *arg) +{ + struct wmi_wmm_params *wmm; + struct wmi_tlv *tlv; + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS); + tlv->len = __cpu_to_le16(sizeof(*wmm)); + wmm = (void *)tlv->value; + ath10k_wmi_set_wmm_param(wmm, arg); + + return ptr + sizeof(*tlv) + sizeof(*wmm); +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id, + const struct wmi_wmm_params_all_arg *arg) +{ + struct wmi_tlv_vdev_set_wmm_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + size_t len; + void *ptr; + + len = sizeof(*tlv) + sizeof(*cmd); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + + ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[0].params, &arg->ac_be); + ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[1].params, &arg->ac_bk); + ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[2].params, &arg->ac_vi); + ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[3].params, &arg->ac_vo); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar, + const struct wmi_sta_keepalive_arg *arg) +{ + struct wmi_tlv_sta_keepalive_cmd *cmd; + struct wmi_sta_keepalive_arp_resp *arp; + struct sk_buff *skb; + struct wmi_tlv *tlv; + void *ptr; + size_t len; + + len = sizeof(*tlv) + sizeof(*cmd) + + sizeof(*tlv) + sizeof(*arp); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(arg->vdev_id); + cmd->enabled = __cpu_to_le32(arg->enabled); + cmd->method = __cpu_to_le32(arg->method); + cmd->interval = __cpu_to_le32(arg->interval); + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE); + tlv->len = __cpu_to_le16(sizeof(*arp)); + arp = (void *)tlv->value; + + arp->src_ip4_addr = arg->src_ip4_addr; + arp->dest_ip4_addr = arg->dest_ip4_addr; + ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d interval %d\n", + arg->vdev_id, arg->enabled, arg->method, arg->interval); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN], + enum wmi_peer_type peer_type) +{ + struct wmi_tlv_peer_create_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->peer_type = __cpu_to_le32(peer_type); + ether_addr_copy(cmd->peer_addr.addr, peer_addr); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN]) +{ + struct wmi_peer_delete_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) +{ + struct wmi_peer_flush_tids_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap); + ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer flush\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id, + const u8 *peer_addr, + enum wmi_peer_param param_id, + u32 param_value) +{ + struct wmi_peer_set_param_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->param_id = __cpu_to_le32(param_id); + cmd->param_value = __cpu_to_le32(param_value); + ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tlv vdev %d peer %pM set param %d value 0x%x\n", + vdev_id, peer_addr, param_id, param_value); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_peer_assoc(struct ath10k *ar, + const struct wmi_peer_assoc_complete_arg *arg) +{ + struct wmi_tlv_peer_assoc_cmd *cmd; + struct wmi_vht_rate_set *vht_rate; + struct wmi_tlv *tlv; + struct sk_buff *skb; + size_t len, legacy_rate_len, ht_rate_len; + void *ptr; + + if (arg->peer_mpdu_density > 16) + return ERR_PTR(-EINVAL); + if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES) + return ERR_PTR(-EINVAL); + if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES) + return ERR_PTR(-EINVAL); + + legacy_rate_len = roundup(arg->peer_legacy_rates.num_rates, + sizeof(__le32)); + ht_rate_len = roundup(arg->peer_ht_rates.num_rates, sizeof(__le32)); + len = (sizeof(*tlv) + sizeof(*cmd)) + + (sizeof(*tlv) + legacy_rate_len) + + (sizeof(*tlv) + ht_rate_len) + + (sizeof(*tlv) + sizeof(*vht_rate)); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + + cmd->vdev_id = __cpu_to_le32(arg->vdev_id); + cmd->new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1); + cmd->assoc_id = __cpu_to_le32(arg->peer_aid); + cmd->flags = __cpu_to_le32(arg->peer_flags); + cmd->caps = __cpu_to_le32(arg->peer_caps); + cmd->listen_intval = __cpu_to_le32(arg->peer_listen_intval); + cmd->ht_caps = __cpu_to_le32(arg->peer_ht_caps); + cmd->max_mpdu = __cpu_to_le32(arg->peer_max_mpdu); + cmd->mpdu_density = __cpu_to_le32(arg->peer_mpdu_density); + cmd->rate_caps = __cpu_to_le32(arg->peer_rate_caps); + cmd->nss = __cpu_to_le32(arg->peer_num_spatial_streams); + cmd->vht_caps = __cpu_to_le32(arg->peer_vht_caps); + cmd->phy_mode = __cpu_to_le32(arg->peer_phymode); + cmd->num_legacy_rates = __cpu_to_le32(arg->peer_legacy_rates.num_rates); + cmd->num_ht_rates = __cpu_to_le32(arg->peer_ht_rates.num_rates); + ether_addr_copy(cmd->mac_addr.addr, arg->addr); + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); + tlv->len = __cpu_to_le16(legacy_rate_len); + memcpy(tlv->value, arg->peer_legacy_rates.rates, + arg->peer_legacy_rates.num_rates); + + ptr += sizeof(*tlv); + ptr += legacy_rate_len; + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); + tlv->len = __cpu_to_le16(ht_rate_len); + memcpy(tlv->value, arg->peer_ht_rates.rates, + arg->peer_ht_rates.num_rates); + + ptr += sizeof(*tlv); + ptr += ht_rate_len; + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VHT_RATE_SET); + tlv->len = __cpu_to_le16(sizeof(*vht_rate)); + vht_rate = (void *)tlv->value; + + vht_rate->rx_max_rate = __cpu_to_le32(arg->peer_vht_rates.rx_max_rate); + vht_rate->rx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set); + vht_rate->tx_max_rate = __cpu_to_le32(arg->peer_vht_rates.tx_max_rate); + vht_rate->tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set); + + ptr += sizeof(*tlv); + ptr += sizeof(*vht_rate); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer assoc\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id, + enum wmi_sta_ps_mode psmode) +{ + struct wmi_sta_powersave_mode_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->sta_ps_mode = __cpu_to_le32(psmode); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set psmode\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id, + enum wmi_sta_powersave_param param_id, + u32 param_value) +{ + struct wmi_sta_powersave_param_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->param_id = __cpu_to_le32(param_id); + cmd->param_value = __cpu_to_le32(param_value); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set sta ps\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac, + enum wmi_ap_ps_peer_param param_id, u32 value) +{ + struct wmi_ap_ps_peer_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + if (!mac) + return ERR_PTR(-EINVAL); + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->param_id = __cpu_to_le32(param_id); + cmd->param_value = __cpu_to_le32(value); + ether_addr_copy(cmd->peer_macaddr.addr, mac); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv ap ps param\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar, + const struct wmi_scan_chan_list_arg *arg) +{ + struct wmi_tlv_scan_chan_list_cmd *cmd; + struct wmi_channel *ci; + struct wmi_channel_arg *ch; + struct wmi_tlv *tlv; + struct sk_buff *skb; + size_t chans_len, len; + int i; + void *ptr, *chans; + + chans_len = arg->n_channels * (sizeof(*tlv) + sizeof(*ci)); + len = (sizeof(*tlv) + sizeof(*cmd)) + + (sizeof(*tlv) + chans_len); + + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->num_scan_chans = __cpu_to_le32(arg->n_channels); + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); + tlv->len = __cpu_to_le16(chans_len); + chans = (void *)tlv->value; + + for (i = 0; i < arg->n_channels; i++) { + ch = &arg->channels[i]; + + tlv = chans; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL); + tlv->len = __cpu_to_le16(sizeof(*ci)); + ci = (void *)tlv->value; + + ath10k_wmi_put_wmi_channel(ar, ci, ch); + + chans += sizeof(*tlv); + chans += sizeof(*ci); + } + + ptr += sizeof(*tlv); + ptr += chans_len; + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan chan list\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_scan_prob_req_oui(struct ath10k *ar, u32 prob_req_oui) +{ + struct wmi_scan_prob_req_oui_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_PROB_REQ_OUI_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->prob_req_oui = __cpu_to_le32(prob_req_oui); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan prob req oui\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, + const void *bcn, size_t bcn_len, + u32 bcn_paddr, bool dtim_zero, + bool deliver_cab) + +{ + struct wmi_bcn_tx_ref_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + struct ieee80211_hdr *hdr; + u16 fc; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + hdr = (struct ieee80211_hdr *)bcn; + fc = le16_to_cpu(hdr->frame_control); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->data_len = __cpu_to_le32(bcn_len); + cmd->data_ptr = __cpu_to_le32(bcn_paddr); + cmd->msdu_id = 0; + cmd->frame_control = __cpu_to_le32(fc); + cmd->flags = 0; + + if (dtim_zero) + cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO); + + if (deliver_cab) + cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar, + const struct wmi_wmm_params_all_arg *arg) +{ + struct wmi_tlv_pdev_set_wmm_cmd *cmd; + struct wmi_wmm_params *wmm; + struct wmi_tlv *tlv; + struct sk_buff *skb; + size_t len; + void *ptr; + + len = (sizeof(*tlv) + sizeof(*cmd)) + + (4 * (sizeof(*tlv) + sizeof(*wmm))); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + + /* nothing to set here */ + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be); + ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk); + ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi); + ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set wmm\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask) +{ + struct wmi_request_stats_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->stats_id = __cpu_to_le32(stats_mask); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_request_peer_stats_info(struct ath10k *ar, + u32 vdev_id, + enum wmi_peer_stats_info_request_type type, + u8 *addr, + u32 reset) +{ + struct wmi_tlv_request_peer_stats_info *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_PEER_STATS_INFO_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->request_type = __cpu_to_le32(type); + + if (type == WMI_REQUEST_ONE_PEER_STATS_INFO) + ether_addr_copy(cmd->peer_macaddr.addr, addr); + + cmd->reset_after_request = __cpu_to_le32(reset); + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request peer stats info\n"); + return skb; +} + +static int +ath10k_wmi_tlv_op_cleanup_mgmt_tx_send(struct ath10k *ar, + struct sk_buff *msdu) +{ + struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu); + struct ath10k_mgmt_tx_pkt_addr *pkt_addr; + struct ath10k_wmi *wmi = &ar->wmi; + + spin_lock_bh(&ar->data_lock); + pkt_addr = idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id); + spin_unlock_bh(&ar->data_lock); + + kfree(pkt_addr); + + return 0; +} + +static int +ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb, + dma_addr_t paddr) +{ + struct ath10k_wmi *wmi = &ar->wmi; + struct ath10k_mgmt_tx_pkt_addr *pkt_addr; + int ret; + + pkt_addr = kmalloc(sizeof(*pkt_addr), GFP_ATOMIC); + if (!pkt_addr) + return -ENOMEM; + + pkt_addr->vaddr = skb; + pkt_addr->paddr = paddr; + + spin_lock_bh(&ar->data_lock); + ret = idr_alloc(&wmi->mgmt_pending_tx, pkt_addr, 0, + wmi->mgmt_max_num_pending_tx, GFP_ATOMIC); + spin_unlock_bh(&ar->data_lock); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx alloc msdu_id ret %d\n", ret); + return ret; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, + dma_addr_t paddr) +{ + struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu); + struct wmi_tlv_mgmt_tx_cmd *cmd; + struct ieee80211_hdr *hdr; + struct ath10k_vif *arvif; + u32 buf_len = msdu->len; + struct wmi_tlv *tlv; + struct sk_buff *skb; + int len, desc_id; + u32 vdev_id; + void *ptr; + + if (!cb->vif) + return ERR_PTR(-EINVAL); + + hdr = (struct ieee80211_hdr *)msdu->data; + arvif = (void *)cb->vif->drv_priv; + vdev_id = arvif->vdev_id; + + if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control) && + (!(ieee80211_is_nullfunc(hdr->frame_control) || + ieee80211_is_qos_nullfunc(hdr->frame_control))))) + return ERR_PTR(-EINVAL); + + len = sizeof(*cmd) + 2 * sizeof(*tlv); + + if ((ieee80211_is_action(hdr->frame_control) || + ieee80211_is_deauth(hdr->frame_control) || + ieee80211_is_disassoc(hdr->frame_control)) && + ieee80211_has_protected(hdr->frame_control)) { + skb_put(msdu, IEEE80211_CCMP_MIC_LEN); + buf_len += IEEE80211_CCMP_MIC_LEN; + } + + buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN); + buf_len = round_up(buf_len, 4); + + len += buf_len; + len = round_up(len, 4); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + desc_id = ath10k_wmi_mgmt_tx_alloc_msdu_id(ar, msdu, paddr); + if (desc_id < 0) + goto err_free_skb; + + cb->msdu_id = desc_id; + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->desc_id = __cpu_to_le32(desc_id); + cmd->chanfreq = 0; + cmd->buf_len = __cpu_to_le32(buf_len); + cmd->frame_len = __cpu_to_le32(msdu->len); + cmd->paddr = __cpu_to_le64(paddr); + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); + tlv->len = __cpu_to_le16(buf_len); + + ptr += sizeof(*tlv); + memcpy(ptr, msdu->data, buf_len); + + return skb; + +err_free_skb: + dev_kfree_skb(skb); + return ERR_PTR(desc_id); +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar, + enum wmi_force_fw_hang_type type, + u32 delay_ms) +{ + struct wmi_force_fw_hang_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->type = __cpu_to_le32(type); + cmd->delay_ms = __cpu_to_le32(delay_ms); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv force fw hang\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable, + u32 log_level) +{ + struct wmi_tlv_dbglog_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + size_t len, bmap_len; + u32 value; + void *ptr; + + if (module_enable) { + value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE( + module_enable, + WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE); + } else { + value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE( + WMI_TLV_DBGLOG_ALL_MODULES, + WMI_TLV_DBGLOG_LOG_LEVEL_WARN); + } + + bmap_len = 0; + len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + bmap_len; + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->param = __cpu_to_le32(WMI_TLV_DBGLOG_PARAM_LOG_LEVEL); + cmd->value = __cpu_to_le32(value); + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); + tlv->len = __cpu_to_le16(bmap_len); + + /* nothing to do here */ + + ptr += sizeof(*tlv); + ptr += sizeof(bmap_len); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv dbglog value 0x%08x\n", value); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_pktlog_enable(struct ath10k *ar, u32 filter) +{ + struct wmi_tlv_pktlog_enable *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + size_t len; + + len = sizeof(*tlv) + sizeof(*cmd); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->filter = __cpu_to_le32(filter); + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog enable filter 0x%08x\n", + filter); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_pdev_get_temperature(struct ath10k *ar) +{ + struct wmi_tlv_pdev_get_temp_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_GET_TEMPERATURE_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature tlv\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar) +{ + struct wmi_tlv_pktlog_disable *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + size_t len; + + len = sizeof(*tlv) + sizeof(*cmd); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog disable\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id, + u32 tim_ie_offset, struct sk_buff *bcn, + u32 prb_caps, u32 prb_erp, void *prb_ies, + size_t prb_ies_len) +{ + struct wmi_tlv_bcn_tmpl_cmd *cmd; + struct wmi_tlv_bcn_prb_info *info; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + size_t len; + + if (WARN_ON(prb_ies_len > 0 && !prb_ies)) + return ERR_PTR(-EINVAL); + + len = sizeof(*tlv) + sizeof(*cmd) + + sizeof(*tlv) + sizeof(*info) + prb_ies_len + + sizeof(*tlv) + roundup(bcn->len, 4); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->tim_ie_offset = __cpu_to_le32(tim_ie_offset); + cmd->buf_len = __cpu_to_le32(bcn->len); + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + /* FIXME: prb_ies_len should be probably aligned to 4byte boundary but + * then it is then impossible to pass original ie len. + * This chunk is not used yet so if setting probe resp template yields + * problems with beaconing or crashes firmware look here. + */ + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO); + tlv->len = __cpu_to_le16(sizeof(*info) + prb_ies_len); + info = (void *)tlv->value; + info->caps = __cpu_to_le32(prb_caps); + info->erp = __cpu_to_le32(prb_erp); + memcpy(info->ies, prb_ies, prb_ies_len); + + ptr += sizeof(*tlv); + ptr += sizeof(*info); + ptr += prb_ies_len; + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); + tlv->len = __cpu_to_le16(roundup(bcn->len, 4)); + memcpy(tlv->value, bcn->data, bcn->len); + + /* FIXME: Adjust TSF? */ + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n", + vdev_id); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id, + struct sk_buff *prb) +{ + struct wmi_tlv_prb_tmpl_cmd *cmd; + struct wmi_tlv_bcn_prb_info *info; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + size_t len; + + len = sizeof(*tlv) + sizeof(*cmd) + + sizeof(*tlv) + sizeof(*info) + + sizeof(*tlv) + roundup(prb->len, 4); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->buf_len = __cpu_to_le32(prb->len); + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO); + tlv->len = __cpu_to_le16(sizeof(*info)); + info = (void *)tlv->value; + info->caps = 0; + info->erp = 0; + + ptr += sizeof(*tlv); + ptr += sizeof(*info); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); + tlv->len = __cpu_to_le16(roundup(prb->len, 4)); + memcpy(tlv->value, prb->data, prb->len); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n", + vdev_id); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, + const u8 *p2p_ie) +{ + struct wmi_tlv_p2p_go_bcn_ie *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + size_t len; + + len = sizeof(*tlv) + sizeof(*cmd) + + sizeof(*tlv) + roundup(p2p_ie[1] + 2, 4); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->ie_len = __cpu_to_le32(p2p_ie[1] + 2); + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); + tlv->len = __cpu_to_le16(roundup(p2p_ie[1] + 2, 4)); + memcpy(tlv->value, p2p_ie, p2p_ie[1] + 2); + + ptr += sizeof(*tlv); + ptr += roundup(p2p_ie[1] + 2, 4); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n", + vdev_id); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id, + enum wmi_tdls_state state) +{ + struct wmi_tdls_set_state_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + size_t len; + /* Set to options from wmi_tlv_tdls_options, + * for now none of them are enabled. + */ + u32 options = 0; + + if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map)) + options |= WMI_TLV_TDLS_BUFFER_STA_EN; + + /* WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL means firm will handle TDLS + * link inactivity detecting logic. + */ + if (state == WMI_TDLS_ENABLE_ACTIVE) + state = WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL; + + len = sizeof(*tlv) + sizeof(*cmd); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->state = __cpu_to_le32(state); + cmd->notification_interval_ms = __cpu_to_le32(5000); + cmd->tx_discovery_threshold = __cpu_to_le32(100); + cmd->tx_teardown_threshold = __cpu_to_le32(5); + cmd->rssi_teardown_threshold = __cpu_to_le32(-75); + cmd->rssi_delta = __cpu_to_le32(-20); + cmd->tdls_options = __cpu_to_le32(options); + cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2); + cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000); + cmd->tdls_puapsd_mask = __cpu_to_le32(0xf); + cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0); + cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10); + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv update fw tdls state %d for vdev %i\n", + state, vdev_id); + return skb; +} + +static u32 ath10k_wmi_tlv_prepare_peer_qos(u8 uapsd_queues, u8 sp) +{ + u32 peer_qos = 0; + + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) + peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VO; + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) + peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VI; + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) + peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BK; + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) + peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BE; + + peer_qos |= SM(sp, WMI_TLV_TDLS_PEER_SP); + + return peer_qos; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar, + const struct wmi_tdls_peer_update_cmd_arg *arg, + const struct wmi_tdls_peer_capab_arg *cap, + const struct wmi_channel_arg *chan_arg) +{ + struct wmi_tdls_peer_update_cmd *cmd; + struct wmi_tdls_peer_capab *peer_cap; + struct wmi_channel *chan; + struct wmi_tlv *tlv; + struct sk_buff *skb; + u32 peer_qos; + void *ptr; + int len; + int i; + + len = sizeof(*tlv) + sizeof(*cmd) + + sizeof(*tlv) + sizeof(*peer_cap) + + sizeof(*tlv) + cap->peer_chan_len * sizeof(*chan); + + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(arg->vdev_id); + ether_addr_copy(cmd->peer_macaddr.addr, arg->addr); + cmd->peer_state = __cpu_to_le32(arg->peer_state); + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES); + tlv->len = __cpu_to_le16(sizeof(*peer_cap)); + peer_cap = (void *)tlv->value; + peer_qos = ath10k_wmi_tlv_prepare_peer_qos(cap->peer_uapsd_queues, + cap->peer_max_sp); + peer_cap->peer_qos = __cpu_to_le32(peer_qos); + peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support); + peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support); + peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass); + peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass); + peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len); + peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len); + + for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++) + peer_cap->peer_operclass[i] = cap->peer_operclass[i]; + + peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder); + peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num); + peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw); + + ptr += sizeof(*tlv); + ptr += sizeof(*peer_cap); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); + tlv->len = __cpu_to_le16(cap->peer_chan_len * sizeof(*chan)); + + ptr += sizeof(*tlv); + + for (i = 0; i < cap->peer_chan_len; i++) { + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL); + tlv->len = __cpu_to_le16(sizeof(*chan)); + chan = (void *)tlv->value; + ath10k_wmi_put_wmi_channel(ar, chan, &chan_arg[i]); + + ptr += sizeof(*tlv); + ptr += sizeof(*chan); + } + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tlv tdls peer update vdev %i state %d n_chans %u\n", + arg->vdev_id, arg->peer_state, cap->peer_chan_len); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period, + u32 duration, u32 next_offset, + u32 enabled) +{ + struct wmi_tlv_set_quiet_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_QUIET_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + + /* vdev_id is not in use, set to 0 */ + cmd->vdev_id = __cpu_to_le32(0); + cmd->period = __cpu_to_le32(period); + cmd->duration = __cpu_to_le32(duration); + cmd->next_start = __cpu_to_le32(next_offset); + cmd->enabled = __cpu_to_le32(enabled); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tlv quiet param: period %u duration %u enabled %d\n", + period, duration, enabled); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar) +{ + struct wmi_tlv_wow_enable_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + size_t len; + + len = sizeof(*tlv) + sizeof(*cmd); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (struct wmi_tlv *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + + cmd->enable = __cpu_to_le32(1); + if (!ar->bus_param.link_can_suspend) + cmd->pause_iface_config = __cpu_to_le32(WOW_IFACE_PAUSE_DISABLED); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_wow_add_wakeup_event(struct ath10k *ar, + u32 vdev_id, + enum wmi_wow_wakeup_event event, + u32 enable) +{ + struct wmi_tlv_wow_add_del_event_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + size_t len; + + len = sizeof(*tlv) + sizeof(*cmd); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (struct wmi_tlv *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->is_add = __cpu_to_le32(enable); + cmd->event_bitmap = __cpu_to_le32(1 << event); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n", + wow_wakeup_event(event), enable, vdev_id); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_gen_wow_host_wakeup_ind(struct ath10k *ar) +{ + struct wmi_tlv_wow_host_wakeup_ind *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + size_t len; + + len = sizeof(*tlv) + sizeof(*cmd); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (struct wmi_tlv *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow host wakeup ind\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_wow_add_pattern(struct ath10k *ar, u32 vdev_id, + u32 pattern_id, const u8 *pattern, + const u8 *bitmask, int pattern_len, + int pattern_offset) +{ + struct wmi_tlv_wow_add_pattern_cmd *cmd; + struct wmi_tlv_wow_bitmap_pattern *bitmap; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + size_t len; + + len = sizeof(*tlv) + sizeof(*cmd) + + sizeof(*tlv) + /* array struct */ + sizeof(*tlv) + sizeof(*bitmap) + /* bitmap */ + sizeof(*tlv) + /* empty ipv4 sync */ + sizeof(*tlv) + /* empty ipv6 sync */ + sizeof(*tlv) + /* empty magic */ + sizeof(*tlv) + /* empty info timeout */ + sizeof(*tlv) + sizeof(u32); /* ratelimit interval */ + + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + /* cmd */ + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->pattern_id = __cpu_to_le32(pattern_id); + cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN); + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + /* bitmap */ + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); + tlv->len = __cpu_to_le16(sizeof(*tlv) + sizeof(*bitmap)); + + ptr += sizeof(*tlv); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T); + tlv->len = __cpu_to_le16(sizeof(*bitmap)); + bitmap = (void *)tlv->value; + + memcpy(bitmap->patternbuf, pattern, pattern_len); + memcpy(bitmap->bitmaskbuf, bitmask, pattern_len); + bitmap->pattern_offset = __cpu_to_le32(pattern_offset); + bitmap->pattern_len = __cpu_to_le32(pattern_len); + bitmap->bitmask_len = __cpu_to_le32(pattern_len); + bitmap->pattern_id = __cpu_to_le32(pattern_id); + + ptr += sizeof(*tlv); + ptr += sizeof(*bitmap); + + /* ipv4 sync */ + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); + tlv->len = __cpu_to_le16(0); + + ptr += sizeof(*tlv); + + /* ipv6 sync */ + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); + tlv->len = __cpu_to_le16(0); + + ptr += sizeof(*tlv); + + /* magic */ + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); + tlv->len = __cpu_to_le16(0); + + ptr += sizeof(*tlv); + + /* pattern info timeout */ + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); + tlv->len = __cpu_to_le16(0); + + ptr += sizeof(*tlv); + + /* ratelimit interval */ + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); + tlv->len = __cpu_to_le16(sizeof(u32)); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d, pattern_offset %d\n", + vdev_id, pattern_id, pattern_offset); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id, + u32 pattern_id) +{ + struct wmi_tlv_wow_del_pattern_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + size_t len; + + len = sizeof(*tlv) + sizeof(*cmd); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (struct wmi_tlv *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->pattern_id = __cpu_to_le32(pattern_id); + cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n", + vdev_id, pattern_id); + return skb; +} + +/* Request FW to start PNO operation */ +static struct sk_buff * +ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar, + u32 vdev_id, + struct wmi_pno_scan_req *pno) +{ + struct nlo_configured_parameters *nlo_list; + struct wmi_tlv_wow_nlo_config_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + __le32 *channel_list; + u16 tlv_len; + size_t len; + void *ptr; + u32 i; + + len = sizeof(*tlv) + sizeof(*cmd) + + sizeof(*tlv) + + /* TLV place holder for array of structures + * nlo_configured_parameters(nlo_list) + */ + sizeof(*tlv); + /* TLV place holder for array of uint32 channel_list */ + + len += sizeof(u32) * min_t(u8, pno->a_networks[0].channel_count, + WMI_NLO_MAX_CHAN); + len += sizeof(struct nlo_configured_parameters) * + min_t(u8, pno->uc_networks_count, WMI_NLO_MAX_SSIDS); + + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + + /* wmi_tlv_wow_nlo_config_cmd parameters*/ + cmd->vdev_id = __cpu_to_le32(pno->vdev_id); + cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN); + + /* current FW does not support min-max range for dwell time */ + cmd->active_dwell_time = __cpu_to_le32(pno->active_max_time); + cmd->passive_dwell_time = __cpu_to_le32(pno->passive_max_time); + + if (pno->do_passive_scan) + cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE); + + /* copy scan interval */ + cmd->fast_scan_period = __cpu_to_le32(pno->fast_scan_period); + cmd->slow_scan_period = __cpu_to_le32(pno->slow_scan_period); + cmd->fast_scan_max_cycles = __cpu_to_le32(pno->fast_scan_max_cycles); + cmd->delay_start_time = __cpu_to_le32(pno->delay_start_time); + + if (pno->enable_pno_scan_randomization) { + cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ | + WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ); + ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr); + ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask); + } + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + /* nlo_configured_parameters(nlo_list) */ + cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count, + WMI_NLO_MAX_SSIDS)); + tlv_len = __le32_to_cpu(cmd->no_of_ssids) * + sizeof(struct nlo_configured_parameters); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); + tlv->len = __cpu_to_le16(tlv_len); + + ptr += sizeof(*tlv); + nlo_list = ptr; + for (i = 0; i < __le32_to_cpu(cmd->no_of_ssids); i++) { + tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header); + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); + tlv->len = __cpu_to_le16(sizeof(struct nlo_configured_parameters) - + sizeof(*tlv)); + + /* copy ssid and it's length */ + nlo_list[i].ssid.valid = __cpu_to_le32(true); + nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len; + memcpy(nlo_list[i].ssid.ssid.ssid, + pno->a_networks[i].ssid.ssid, + __le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len)); + + /* copy rssi threshold */ + if (pno->a_networks[i].rssi_threshold && + pno->a_networks[i].rssi_threshold > -300) { + nlo_list[i].rssi_cond.valid = __cpu_to_le32(true); + nlo_list[i].rssi_cond.rssi = + __cpu_to_le32(pno->a_networks[i].rssi_threshold); + } + + nlo_list[i].bcast_nw_type.valid = __cpu_to_le32(true); + nlo_list[i].bcast_nw_type.bcast_nw_type = + __cpu_to_le32(pno->a_networks[i].bcast_nw_type); + } + + ptr += __le32_to_cpu(cmd->no_of_ssids) * sizeof(struct nlo_configured_parameters); + + /* copy channel info */ + cmd->num_of_channels = __cpu_to_le32(min_t(u8, + pno->a_networks[0].channel_count, + WMI_NLO_MAX_CHAN)); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); + tlv->len = __cpu_to_le16(__le32_to_cpu(cmd->num_of_channels) * + sizeof(u_int32_t)); + ptr += sizeof(*tlv); + + channel_list = (__le32 *)ptr; + for (i = 0; i < __le32_to_cpu(cmd->num_of_channels); i++) + channel_list[i] = __cpu_to_le32(pno->a_networks[0].channels[i]); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n", + vdev_id); + + return skb; +} + +/* Request FW to stop ongoing PNO operation */ +static struct sk_buff *ath10k_wmi_tlv_op_gen_config_pno_stop(struct ath10k *ar, + u32 vdev_id) +{ + struct wmi_tlv_wow_nlo_config_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + size_t len; + + len = sizeof(*tlv) + sizeof(*cmd) + + sizeof(*tlv) + + /* TLV place holder for array of structures + * nlo_configured_parameters(nlo_list) + */ + sizeof(*tlv); + /* TLV place holder for array of uint32 channel_list */ + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_STOP); + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + /* nlo_configured_parameters(nlo_list) */ + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); + tlv->len = __cpu_to_le16(0); + + ptr += sizeof(*tlv); + + /* channel list */ + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); + tlv->len = __cpu_to_le16(0); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop pno config vdev_id %d\n", vdev_id); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_config_pno(struct ath10k *ar, u32 vdev_id, + struct wmi_pno_scan_req *pno_scan) +{ + if (pno_scan->enable) + return ath10k_wmi_tlv_op_gen_config_pno_start(ar, vdev_id, pno_scan); + else + return ath10k_wmi_tlv_op_gen_config_pno_stop(ar, vdev_id); +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable) +{ + struct wmi_tlv_adaptive_qcs *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + size_t len; + + len = sizeof(*tlv) + sizeof(*cmd); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->enable = __cpu_to_le32(enable ? 1 : 0); + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv adaptive qcs %d\n", enable); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_echo(struct ath10k *ar, u32 value) +{ + struct wmi_echo_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + size_t len; + + len = sizeof(*tlv) + sizeof(*cmd); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ECHO_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->value = cpu_to_le32(value); + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv echo value 0x%08x\n", value); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_vdev_spectral_conf(struct ath10k *ar, + const struct wmi_vdev_spectral_conf_arg *arg) +{ + struct wmi_vdev_spectral_conf_cmd *cmd; + struct sk_buff *skb; + struct wmi_tlv *tlv; + void *ptr; + size_t len; + + len = sizeof(*tlv) + sizeof(*cmd); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_CONFIGURE_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(arg->vdev_id); + cmd->scan_count = __cpu_to_le32(arg->scan_count); + cmd->scan_period = __cpu_to_le32(arg->scan_period); + cmd->scan_priority = __cpu_to_le32(arg->scan_priority); + cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size); + cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena); + cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena); + cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref); + cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay); + cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr); + cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr); + cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode); + cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode); + cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr); + cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format); + cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode); + cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale); + cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj); + cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask); + + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, + u32 trigger, u32 enable) +{ + struct wmi_vdev_spectral_enable_cmd *cmd; + struct sk_buff *skb; + struct wmi_tlv *tlv; + void *ptr; + size_t len; + + len = sizeof(*tlv) + sizeof(*cmd); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_ENABLE_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->trigger_cmd = __cpu_to_le32(trigger); + cmd->enable_cmd = __cpu_to_le32(enable); + + return skb; +} + +/****************/ +/* TLV mappings */ +/****************/ + +static struct wmi_cmd_map wmi_tlv_cmd_map = { + .init_cmdid = WMI_TLV_INIT_CMDID, + .start_scan_cmdid = WMI_TLV_START_SCAN_CMDID, + .stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID, + .scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID, + .scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID, + .scan_prob_req_oui_cmdid = WMI_TLV_SCAN_PROB_REQ_OUI_CMDID, + .pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID, + .pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID, + .pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID, + .pdev_pktlog_enable_cmdid = WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID, + .pdev_pktlog_disable_cmdid = WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID, + .pdev_set_wmm_params_cmdid = WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID, + .pdev_set_ht_cap_ie_cmdid = WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID, + .pdev_set_vht_cap_ie_cmdid = WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID, + .pdev_set_dscp_tid_map_cmdid = WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID, + .pdev_set_quiet_mode_cmdid = WMI_TLV_PDEV_SET_QUIET_MODE_CMDID, + .pdev_green_ap_ps_enable_cmdid = WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID, + .pdev_get_tpc_config_cmdid = WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID, + .pdev_set_base_macaddr_cmdid = WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID, + .vdev_create_cmdid = WMI_TLV_VDEV_CREATE_CMDID, + .vdev_delete_cmdid = WMI_TLV_VDEV_DELETE_CMDID, + .vdev_start_request_cmdid = WMI_TLV_VDEV_START_REQUEST_CMDID, + .vdev_restart_request_cmdid = WMI_TLV_VDEV_RESTART_REQUEST_CMDID, + .vdev_up_cmdid = WMI_TLV_VDEV_UP_CMDID, + .vdev_stop_cmdid = WMI_TLV_VDEV_STOP_CMDID, + .vdev_down_cmdid = WMI_TLV_VDEV_DOWN_CMDID, + .vdev_set_param_cmdid = WMI_TLV_VDEV_SET_PARAM_CMDID, + .vdev_install_key_cmdid = WMI_TLV_VDEV_INSTALL_KEY_CMDID, + .peer_create_cmdid = WMI_TLV_PEER_CREATE_CMDID, + .peer_delete_cmdid = WMI_TLV_PEER_DELETE_CMDID, + .peer_flush_tids_cmdid = WMI_TLV_PEER_FLUSH_TIDS_CMDID, + .peer_set_param_cmdid = WMI_TLV_PEER_SET_PARAM_CMDID, + .peer_assoc_cmdid = WMI_TLV_PEER_ASSOC_CMDID, + .peer_add_wds_entry_cmdid = WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID, + .peer_remove_wds_entry_cmdid = WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID, + .peer_mcast_group_cmdid = WMI_TLV_PEER_MCAST_GROUP_CMDID, + .bcn_tx_cmdid = WMI_TLV_BCN_TX_CMDID, + .pdev_send_bcn_cmdid = WMI_TLV_PDEV_SEND_BCN_CMDID, + .bcn_tmpl_cmdid = WMI_TLV_BCN_TMPL_CMDID, + .bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID, + .prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID, + .mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID, + .mgmt_tx_send_cmdid = WMI_TLV_MGMT_TX_SEND_CMD, + .prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID, + .addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID, + .addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID, + .addba_status_cmdid = WMI_TLV_ADDBA_STATUS_CMDID, + .delba_send_cmdid = WMI_TLV_DELBA_SEND_CMDID, + .addba_set_resp_cmdid = WMI_TLV_ADDBA_SET_RESP_CMDID, + .send_singleamsdu_cmdid = WMI_TLV_SEND_SINGLEAMSDU_CMDID, + .sta_powersave_mode_cmdid = WMI_TLV_STA_POWERSAVE_MODE_CMDID, + .sta_powersave_param_cmdid = WMI_TLV_STA_POWERSAVE_PARAM_CMDID, + .sta_mimo_ps_mode_cmdid = WMI_TLV_STA_MIMO_PS_MODE_CMDID, + .pdev_dfs_enable_cmdid = WMI_TLV_PDEV_DFS_ENABLE_CMDID, + .pdev_dfs_disable_cmdid = WMI_TLV_PDEV_DFS_DISABLE_CMDID, + .roam_scan_mode = WMI_TLV_ROAM_SCAN_MODE, + .roam_scan_rssi_threshold = WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD, + .roam_scan_period = WMI_TLV_ROAM_SCAN_PERIOD, + .roam_scan_rssi_change_threshold = + WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + .roam_ap_profile = WMI_TLV_ROAM_AP_PROFILE, + .ofl_scan_add_ap_profile = WMI_TLV_ROAM_AP_PROFILE, + .ofl_scan_remove_ap_profile = WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE, + .ofl_scan_period = WMI_TLV_OFL_SCAN_PERIOD, + .p2p_dev_set_device_info = WMI_TLV_P2P_DEV_SET_DEVICE_INFO, + .p2p_dev_set_discoverability = WMI_TLV_P2P_DEV_SET_DISCOVERABILITY, + .p2p_go_set_beacon_ie = WMI_TLV_P2P_GO_SET_BEACON_IE, + .p2p_go_set_probe_resp_ie = WMI_TLV_P2P_GO_SET_PROBE_RESP_IE, + .p2p_set_vendor_ie_data_cmdid = WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID, + .ap_ps_peer_param_cmdid = WMI_TLV_AP_PS_PEER_PARAM_CMDID, + .ap_ps_peer_uapsd_coex_cmdid = WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID, + .peer_rate_retry_sched_cmdid = WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID, + .wlan_profile_trigger_cmdid = WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID, + .wlan_profile_set_hist_intvl_cmdid = + WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + .wlan_profile_get_profile_data_cmdid = + WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + .wlan_profile_enable_profile_id_cmdid = + WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + .wlan_profile_list_profile_id_cmdid = + WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, + .pdev_suspend_cmdid = WMI_TLV_PDEV_SUSPEND_CMDID, + .pdev_resume_cmdid = WMI_TLV_PDEV_RESUME_CMDID, + .add_bcn_filter_cmdid = WMI_TLV_ADD_BCN_FILTER_CMDID, + .rmv_bcn_filter_cmdid = WMI_TLV_RMV_BCN_FILTER_CMDID, + .wow_add_wake_pattern_cmdid = WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID, + .wow_del_wake_pattern_cmdid = WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID, + .wow_enable_disable_wake_event_cmdid = + WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, + .wow_enable_cmdid = WMI_TLV_WOW_ENABLE_CMDID, + .wow_hostwakeup_from_sleep_cmdid = + WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, + .rtt_measreq_cmdid = WMI_TLV_RTT_MEASREQ_CMDID, + .rtt_tsf_cmdid = WMI_TLV_RTT_TSF_CMDID, + .vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID, + .vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID, + .request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID, + .request_peer_stats_info_cmdid = WMI_TLV_REQUEST_PEER_STATS_INFO_CMDID, + .set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID, + .network_list_offload_config_cmdid = + WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID, + .gtk_offload_cmdid = WMI_TLV_GTK_OFFLOAD_CMDID, + .csa_offload_enable_cmdid = WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID, + .csa_offload_chanswitch_cmdid = WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID, + .chatter_set_mode_cmdid = WMI_TLV_CHATTER_SET_MODE_CMDID, + .peer_tid_addba_cmdid = WMI_TLV_PEER_TID_ADDBA_CMDID, + .peer_tid_delba_cmdid = WMI_TLV_PEER_TID_DELBA_CMDID, + .sta_dtim_ps_method_cmdid = WMI_TLV_STA_DTIM_PS_METHOD_CMDID, + .sta_uapsd_auto_trig_cmdid = WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID, + .sta_keepalive_cmd = WMI_TLV_STA_KEEPALIVE_CMDID, + .echo_cmdid = WMI_TLV_ECHO_CMDID, + .pdev_utf_cmdid = WMI_TLV_PDEV_UTF_CMDID, + .dbglog_cfg_cmdid = WMI_TLV_DBGLOG_CFG_CMDID, + .pdev_qvit_cmdid = WMI_TLV_PDEV_QVIT_CMDID, + .pdev_ftm_intg_cmdid = WMI_TLV_PDEV_FTM_INTG_CMDID, + .vdev_set_keepalive_cmdid = WMI_TLV_VDEV_SET_KEEPALIVE_CMDID, + .vdev_get_keepalive_cmdid = WMI_TLV_VDEV_GET_KEEPALIVE_CMDID, + .force_fw_hang_cmdid = WMI_TLV_FORCE_FW_HANG_CMDID, + .gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID, + .gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID, + .pdev_get_temperature_cmdid = WMI_TLV_PDEV_GET_TEMPERATURE_CMDID, + .vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID, + .tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID, + .tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID, + .adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID, + .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED, + .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED, + .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .oem_req_cmdid = WMI_CMD_UNSUPPORTED, + .nan_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED, + .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED, + .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED, + .fwtest_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, +}; + +static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = { + .tx_chain_mask = WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK, + .rx_chain_mask = WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK, + .txpower_limit2g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G, + .txpower_limit5g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G, + .txpower_scale = WMI_TLV_PDEV_PARAM_TXPOWER_SCALE, + .beacon_gen_mode = WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE, + .beacon_tx_mode = WMI_TLV_PDEV_PARAM_BEACON_TX_MODE, + .resmgr_offchan_mode = WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE, + .protection_mode = WMI_TLV_PDEV_PARAM_PROTECTION_MODE, + .dynamic_bw = WMI_TLV_PDEV_PARAM_DYNAMIC_BW, + .non_agg_sw_retry_th = WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH, + .agg_sw_retry_th = WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH, + .sta_kickout_th = WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH, + .ac_aggrsize_scaling = WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING, + .ltr_enable = WMI_TLV_PDEV_PARAM_LTR_ENABLE, + .ltr_ac_latency_be = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE, + .ltr_ac_latency_bk = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK, + .ltr_ac_latency_vi = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI, + .ltr_ac_latency_vo = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO, + .ltr_ac_latency_timeout = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, + .ltr_sleep_override = WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE, + .ltr_rx_override = WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE, + .ltr_tx_activity_timeout = WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, + .l1ss_enable = WMI_TLV_PDEV_PARAM_L1SS_ENABLE, + .dsleep_enable = WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE, + .pcielp_txbuf_flush = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH, + .pcielp_txbuf_watermark = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, + .pcielp_txbuf_tmo_en = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, + .pcielp_txbuf_tmo_value = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE, + .pdev_stats_update_period = WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, + .vdev_stats_update_period = WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, + .peer_stats_update_period = WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, + .bcnflt_stats_update_period = + WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, + .pmf_qos = WMI_TLV_PDEV_PARAM_PMF_QOS, + .arp_ac_override = WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE, + .dcs = WMI_TLV_PDEV_PARAM_DCS, + .ani_enable = WMI_TLV_PDEV_PARAM_ANI_ENABLE, + .ani_poll_period = WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD, + .ani_listen_period = WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD, + .ani_ofdm_level = WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL, + .ani_cck_level = WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL, + .dyntxchain = WMI_TLV_PDEV_PARAM_DYNTXCHAIN, + .proxy_sta = WMI_TLV_PDEV_PARAM_PROXY_STA, + .idle_ps_config = WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG, + .power_gating_sleep = WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP, + .fast_channel_reset = WMI_TLV_PDEV_PARAM_UNSUPPORTED, + .burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR, + .burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE, + .cal_period = WMI_PDEV_PARAM_UNSUPPORTED, + .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED, + .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED, + .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED, + .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED, + .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, + .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, + .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED, + .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED, + .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .en_stats = WMI_PDEV_PARAM_UNSUPPORTED, + .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED, + .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED, + .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED, + .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED, + .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED, + .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED, + .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED, + .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED, + .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED, + .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED, + .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED, + .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED, + .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED, + .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED, + .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, + .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .rfkill_config = WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG, + .rfkill_enable = WMI_TLV_PDEV_PARAM_RFKILL_ENABLE, + .peer_stats_info_enable = WMI_TLV_PDEV_PARAM_PEER_STATS_INFO_ENABLE, +}; + +static struct wmi_peer_param_map wmi_tlv_peer_param_map = { + .smps_state = WMI_TLV_PEER_SMPS_STATE, + .ampdu = WMI_TLV_PEER_AMPDU, + .authorize = WMI_TLV_PEER_AUTHORIZE, + .chan_width = WMI_TLV_PEER_CHAN_WIDTH, + .nss = WMI_TLV_PEER_NSS, + .use_4addr = WMI_TLV_PEER_USE_4ADDR, + .membership = WMI_TLV_PEER_MEMBERSHIP, + .user_pos = WMI_TLV_PEER_USERPOS, + .crit_proto_hint_enabled = WMI_TLV_PEER_CRIT_PROTO_HINT_ENABLED, + .tx_fail_cnt_thr = WMI_TLV_PEER_TX_FAIL_CNT_THR, + .set_hw_retry_cts2s = WMI_TLV_PEER_SET_HW_RETRY_CTS2S, + .ibss_atim_win_len = WMI_TLV_PEER_IBSS_ATIM_WINDOW_LENGTH, + .phymode = WMI_TLV_PEER_PHYMODE, + .use_fixed_power = WMI_TLV_PEER_USE_FIXED_PWR, + .dummy_var = WMI_TLV_PEER_DUMMY_VAR, +}; + +static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = { + .rts_threshold = WMI_TLV_VDEV_PARAM_RTS_THRESHOLD, + .fragmentation_threshold = WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD, + .beacon_interval = WMI_TLV_VDEV_PARAM_BEACON_INTERVAL, + .listen_interval = WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL, + .multicast_rate = WMI_TLV_VDEV_PARAM_MULTICAST_RATE, + .mgmt_tx_rate = WMI_TLV_VDEV_PARAM_MGMT_TX_RATE, + .slot_time = WMI_TLV_VDEV_PARAM_SLOT_TIME, + .preamble = WMI_TLV_VDEV_PARAM_PREAMBLE, + .swba_time = WMI_TLV_VDEV_PARAM_SWBA_TIME, + .wmi_vdev_stats_update_period = WMI_TLV_VDEV_STATS_UPDATE_PERIOD, + .wmi_vdev_pwrsave_ageout_time = WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME, + .wmi_vdev_host_swba_interval = WMI_TLV_VDEV_HOST_SWBA_INTERVAL, + .dtim_period = WMI_TLV_VDEV_PARAM_DTIM_PERIOD, + .wmi_vdev_oc_scheduler_air_time_limit = + WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, + .wds = WMI_TLV_VDEV_PARAM_WDS, + .atim_window = WMI_TLV_VDEV_PARAM_ATIM_WINDOW, + .bmiss_count_max = WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX, + .bmiss_first_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT, + .bmiss_final_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT, + .feature_wmm = WMI_TLV_VDEV_PARAM_FEATURE_WMM, + .chwidth = WMI_TLV_VDEV_PARAM_CHWIDTH, + .chextoffset = WMI_TLV_VDEV_PARAM_CHEXTOFFSET, + .disable_htprotection = WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION, + .sta_quickkickout = WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT, + .mgmt_rate = WMI_TLV_VDEV_PARAM_MGMT_RATE, + .protection_mode = WMI_TLV_VDEV_PARAM_PROTECTION_MODE, + .fixed_rate = WMI_TLV_VDEV_PARAM_FIXED_RATE, + .sgi = WMI_TLV_VDEV_PARAM_SGI, + .ldpc = WMI_TLV_VDEV_PARAM_LDPC, + .tx_stbc = WMI_TLV_VDEV_PARAM_TX_STBC, + .rx_stbc = WMI_TLV_VDEV_PARAM_RX_STBC, + .intra_bss_fwd = WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD, + .def_keyid = WMI_TLV_VDEV_PARAM_DEF_KEYID, + .nss = WMI_TLV_VDEV_PARAM_NSS, + .bcast_data_rate = WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE, + .mcast_data_rate = WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE, + .mcast_indicate = WMI_TLV_VDEV_PARAM_MCAST_INDICATE, + .dhcp_indicate = WMI_TLV_VDEV_PARAM_DHCP_INDICATE, + .unknown_dest_indicate = WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE, + .ap_keepalive_min_idle_inactive_time_secs = + WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, + .ap_keepalive_max_idle_inactive_time_secs = + WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, + .ap_keepalive_max_unresponsive_time_secs = + WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, + .ap_enable_nawds = WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS, + .mcast2ucast_set = WMI_TLV_VDEV_PARAM_UNSUPPORTED, + .enable_rtscts = WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS, + .txbf = WMI_TLV_VDEV_PARAM_TXBF, + .packet_powersave = WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE, + .drop_unencry = WMI_TLV_VDEV_PARAM_DROP_UNENCRY, + .tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE, + .ap_detect_out_of_sync_sleeping_sta_time_secs = + WMI_TLV_VDEV_PARAM_UNSUPPORTED, + .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED, + .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED, + .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED, + .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED, + .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED, + .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED, + .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED, + .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, + .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, + .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, +}; + +static const struct wmi_ops wmi_tlv_ops = { + .rx = ath10k_wmi_tlv_op_rx, + .map_svc = wmi_tlv_svc_map, + .map_svc_ext = wmi_tlv_svc_map_ext, + + .pull_scan = ath10k_wmi_tlv_op_pull_scan_ev, + .pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev, + .pull_mgmt_tx_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev, + .pull_mgmt_tx_bundle_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev, + .pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev, + .pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev, + .pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev, + .pull_swba = ath10k_wmi_tlv_op_pull_swba_ev, + .pull_phyerr_hdr = ath10k_wmi_tlv_op_pull_phyerr_ev_hdr, + .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev, + .pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev, + .pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev, + .pull_svc_avail = ath10k_wmi_tlv_op_pull_svc_avail, + .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats, + .pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev, + .pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev, + .pull_echo_ev = ath10k_wmi_tlv_op_pull_echo_ev, + .get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme, + + .gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend, + .gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume, + .gen_pdev_set_rd = ath10k_wmi_tlv_op_gen_pdev_set_rd, + .gen_pdev_set_param = ath10k_wmi_tlv_op_gen_pdev_set_param, + .gen_init = ath10k_wmi_tlv_op_gen_init, + .gen_start_scan = ath10k_wmi_tlv_op_gen_start_scan, + .gen_stop_scan = ath10k_wmi_tlv_op_gen_stop_scan, + .gen_vdev_create = ath10k_wmi_tlv_op_gen_vdev_create, + .gen_vdev_delete = ath10k_wmi_tlv_op_gen_vdev_delete, + .gen_vdev_start = ath10k_wmi_tlv_op_gen_vdev_start, + .gen_vdev_stop = ath10k_wmi_tlv_op_gen_vdev_stop, + .gen_vdev_up = ath10k_wmi_tlv_op_gen_vdev_up, + .gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down, + .gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param, + .gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key, + .gen_vdev_wmm_conf = ath10k_wmi_tlv_op_gen_vdev_wmm_conf, + .gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create, + .gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete, + .gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush, + .gen_peer_set_param = ath10k_wmi_tlv_op_gen_peer_set_param, + .gen_peer_assoc = ath10k_wmi_tlv_op_gen_peer_assoc, + .gen_set_psmode = ath10k_wmi_tlv_op_gen_set_psmode, + .gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps, + .gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps, + .gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list, + .gen_scan_prob_req_oui = ath10k_wmi_tlv_op_gen_scan_prob_req_oui, + .gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma, + .gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm, + .gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats, + .gen_request_peer_stats_info = ath10k_wmi_tlv_op_gen_request_peer_stats_info, + .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang, + /* .gen_mgmt_tx = not implemented; HTT is used */ + .gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send, + .cleanup_mgmt_tx_send = ath10k_wmi_tlv_op_cleanup_mgmt_tx_send, + .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg, + .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable, + .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable, + .gen_pdev_set_quiet_mode = ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode, + .gen_pdev_get_temperature = ath10k_wmi_tlv_op_gen_pdev_get_temperature, + /* .gen_addba_clear_resp not implemented */ + /* .gen_addba_send not implemented */ + /* .gen_addba_set_resp not implemented */ + /* .gen_delba_send not implemented */ + .gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl, + .gen_prb_tmpl = ath10k_wmi_tlv_op_gen_prb_tmpl, + .gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie, + .gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd, + .gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive, + .gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable, + .gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event, + .gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind, + .gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern, + .gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern, + .gen_wow_config_pno = ath10k_wmi_tlv_op_gen_config_pno, + .gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state, + .gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update, + .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs, + .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill, + .get_vdev_subtype = ath10k_wmi_tlv_op_get_vdev_subtype, + .gen_echo = ath10k_wmi_tlv_op_gen_echo, + .gen_vdev_spectral_conf = ath10k_wmi_tlv_op_gen_vdev_spectral_conf, + .gen_vdev_spectral_enable = ath10k_wmi_tlv_op_gen_vdev_spectral_enable, + /* .gen_gpio_config not implemented */ + /* .gen_gpio_output not implemented */ +}; + +static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = { + .auth = WMI_TLV_PEER_AUTH, + .qos = WMI_TLV_PEER_QOS, + .need_ptk_4_way = WMI_TLV_PEER_NEED_PTK_4_WAY, + .need_gtk_2_way = WMI_TLV_PEER_NEED_GTK_2_WAY, + .apsd = WMI_TLV_PEER_APSD, + .ht = WMI_TLV_PEER_HT, + .bw40 = WMI_TLV_PEER_40MHZ, + .stbc = WMI_TLV_PEER_STBC, + .ldbc = WMI_TLV_PEER_LDPC, + .dyn_mimops = WMI_TLV_PEER_DYN_MIMOPS, + .static_mimops = WMI_TLV_PEER_STATIC_MIMOPS, + .spatial_mux = WMI_TLV_PEER_SPATIAL_MUX, + .vht = WMI_TLV_PEER_VHT, + .bw80 = WMI_TLV_PEER_80MHZ, + .pmf = WMI_TLV_PEER_PMF, + .bw160 = WMI_TLV_PEER_160MHZ, +}; + +/************/ +/* TLV init */ +/************/ + +void ath10k_wmi_tlv_attach(struct ath10k *ar) +{ + ar->wmi.cmd = &wmi_tlv_cmd_map; + ar->wmi.vdev_param = &wmi_tlv_vdev_param_map; + ar->wmi.pdev_param = &wmi_tlv_pdev_param_map; + ar->wmi.peer_param = &wmi_tlv_peer_param_map; + ar->wmi.ops = &wmi_tlv_ops; + ar->wmi.peer_flags = &wmi_tlv_peer_flags_map; +} diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h new file mode 100644 index 000000000000..8a2f87d0a3a3 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h @@ -0,0 +1,2683 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef _WMI_TLV_H +#define _WMI_TLV_H + +#include <linux/bitops.h> + +#define WMI_TLV_CMD(grp_id) (((grp_id) << 12) | 0x1) +#define WMI_TLV_EV(grp_id) (((grp_id) << 12) | 0x1) +#define WMI_TLV_CMD_UNSUPPORTED 0 +#define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0 +#define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0 +#define WMI_TLV_MGMT_TX_FRAME_MAX_LEN 64 + +#define WMI_RSRC_CFG_FLAG_TX_ACK_RSSI BIT(18) + +enum wmi_tlv_grp_id { + WMI_TLV_GRP_START = 0x3, + WMI_TLV_GRP_SCAN = WMI_TLV_GRP_START, + WMI_TLV_GRP_PDEV, + WMI_TLV_GRP_VDEV, + WMI_TLV_GRP_PEER, + WMI_TLV_GRP_MGMT, + WMI_TLV_GRP_BA_NEG, + WMI_TLV_GRP_STA_PS, + WMI_TLV_GRP_DFS, + WMI_TLV_GRP_ROAM, + WMI_TLV_GRP_OFL_SCAN, + WMI_TLV_GRP_P2P, + WMI_TLV_GRP_AP_PS, + WMI_TLV_GRP_RATECTL, + WMI_TLV_GRP_PROFILE, + WMI_TLV_GRP_SUSPEND, + WMI_TLV_GRP_BCN_FILTER, + WMI_TLV_GRP_WOW, + WMI_TLV_GRP_RTT, + WMI_TLV_GRP_SPECTRAL, + WMI_TLV_GRP_STATS, + WMI_TLV_GRP_ARP_NS_OFL, + WMI_TLV_GRP_NLO_OFL, + WMI_TLV_GRP_GTK_OFL, + WMI_TLV_GRP_CSA_OFL, + WMI_TLV_GRP_CHATTER, + WMI_TLV_GRP_TID_ADDBA, + WMI_TLV_GRP_MISC, + WMI_TLV_GRP_GPIO, + WMI_TLV_GRP_FWTEST, + WMI_TLV_GRP_TDLS, + WMI_TLV_GRP_RESMGR, + WMI_TLV_GRP_STA_SMPS, + WMI_TLV_GRP_WLAN_HB, + WMI_TLV_GRP_RMC, + WMI_TLV_GRP_MHF_OFL, + WMI_TLV_GRP_LOCATION_SCAN, + WMI_TLV_GRP_OEM, + WMI_TLV_GRP_NAN, + WMI_TLV_GRP_COEX, + WMI_TLV_GRP_OBSS_OFL, + WMI_TLV_GRP_LPI, + WMI_TLV_GRP_EXTSCAN, + WMI_TLV_GRP_DHCP_OFL, + WMI_TLV_GRP_IPA, + WMI_TLV_GRP_MDNS_OFL, + WMI_TLV_GRP_SAP_OFL, +}; + +enum wmi_tlv_cmd_id { + WMI_TLV_INIT_CMDID = 0x1, + WMI_TLV_START_SCAN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SCAN), + WMI_TLV_STOP_SCAN_CMDID, + WMI_TLV_SCAN_CHAN_LIST_CMDID, + WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID, + WMI_TLV_SCAN_UPDATE_REQUEST_CMDID, + WMI_TLV_SCAN_PROB_REQ_OUI_CMDID, + WMI_TLV_PDEV_SET_REGDOMAIN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PDEV), + WMI_TLV_PDEV_SET_CHANNEL_CMDID, + WMI_TLV_PDEV_SET_PARAM_CMDID, + WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID, + WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID, + WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID, + WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID, + WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID, + WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID, + WMI_TLV_PDEV_SET_QUIET_MODE_CMDID, + WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID, + WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID, + WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID, + WMI_TLV_PDEV_DUMP_CMDID, + WMI_TLV_PDEV_SET_LED_CONFIG_CMDID, + WMI_TLV_PDEV_GET_TEMPERATURE_CMDID, + WMI_TLV_PDEV_SET_LED_FLASHING_CMDID, + WMI_TLV_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_VDEV), + WMI_TLV_VDEV_DELETE_CMDID, + WMI_TLV_VDEV_START_REQUEST_CMDID, + WMI_TLV_VDEV_RESTART_REQUEST_CMDID, + WMI_TLV_VDEV_UP_CMDID, + WMI_TLV_VDEV_STOP_CMDID, + WMI_TLV_VDEV_DOWN_CMDID, + WMI_TLV_VDEV_SET_PARAM_CMDID, + WMI_TLV_VDEV_INSTALL_KEY_CMDID, + WMI_TLV_VDEV_WNM_SLEEPMODE_CMDID, + WMI_TLV_VDEV_WMM_ADDTS_CMDID, + WMI_TLV_VDEV_WMM_DELTS_CMDID, + WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID, + WMI_TLV_VDEV_SET_GTX_PARAMS_CMDID, + WMI_TLV_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMDID, + WMI_TLV_VDEV_PLMREQ_START_CMDID, + WMI_TLV_VDEV_PLMREQ_STOP_CMDID, + WMI_TLV_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PEER), + WMI_TLV_PEER_DELETE_CMDID, + WMI_TLV_PEER_FLUSH_TIDS_CMDID, + WMI_TLV_PEER_SET_PARAM_CMDID, + WMI_TLV_PEER_ASSOC_CMDID, + WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID, + WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID, + WMI_TLV_PEER_MCAST_GROUP_CMDID, + WMI_TLV_PEER_INFO_REQ_CMDID, + WMI_TLV_PEER_GET_ESTIMATED_LINKSPEED_CMDID, + WMI_TLV_BCN_TX_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MGMT), + WMI_TLV_PDEV_SEND_BCN_CMDID, + WMI_TLV_BCN_TMPL_CMDID, + WMI_TLV_BCN_FILTER_RX_CMDID, + WMI_TLV_PRB_REQ_FILTER_RX_CMDID, + WMI_TLV_MGMT_TX_CMDID, + WMI_TLV_PRB_TMPL_CMDID, + WMI_TLV_MGMT_TX_SEND_CMD, + WMI_TLV_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BA_NEG), + WMI_TLV_ADDBA_SEND_CMDID, + WMI_TLV_ADDBA_STATUS_CMDID, + WMI_TLV_DELBA_SEND_CMDID, + WMI_TLV_ADDBA_SET_RESP_CMDID, + WMI_TLV_SEND_SINGLEAMSDU_CMDID, + WMI_TLV_STA_POWERSAVE_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STA_PS), + WMI_TLV_STA_POWERSAVE_PARAM_CMDID, + WMI_TLV_STA_MIMO_PS_MODE_CMDID, + WMI_TLV_PDEV_DFS_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_DFS), + WMI_TLV_PDEV_DFS_DISABLE_CMDID, + WMI_TLV_DFS_PHYERR_FILTER_ENA_CMDID, + WMI_TLV_DFS_PHYERR_FILTER_DIS_CMDID, + WMI_TLV_ROAM_SCAN_MODE = WMI_TLV_CMD(WMI_TLV_GRP_ROAM), + WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD, + WMI_TLV_ROAM_SCAN_PERIOD, + WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + WMI_TLV_ROAM_AP_PROFILE, + WMI_TLV_ROAM_CHAN_LIST, + WMI_TLV_ROAM_SCAN_CMD, + WMI_TLV_ROAM_SYNCH_COMPLETE, + WMI_TLV_ROAM_SET_RIC_REQUEST_CMDID, + WMI_TLV_ROAM_INVOKE_CMDID, + WMI_TLV_OFL_SCAN_ADD_AP_PROFILE = WMI_TLV_CMD(WMI_TLV_GRP_OFL_SCAN), + WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE, + WMI_TLV_OFL_SCAN_PERIOD, + WMI_TLV_P2P_DEV_SET_DEVICE_INFO = WMI_TLV_CMD(WMI_TLV_GRP_P2P), + WMI_TLV_P2P_DEV_SET_DISCOVERABILITY, + WMI_TLV_P2P_GO_SET_BEACON_IE, + WMI_TLV_P2P_GO_SET_PROBE_RESP_IE, + WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID, + WMI_TLV_P2P_DISC_OFFLOAD_CONFIG_CMDID, + WMI_TLV_P2P_DISC_OFFLOAD_APPIE_CMDID, + WMI_TLV_P2P_DISC_OFFLOAD_PATTERN_CMDID, + WMI_TLV_P2P_SET_OPPPS_PARAM_CMDID, + WMI_TLV_AP_PS_PEER_PARAM_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_AP_PS), + WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID, + WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RATECTL), + WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PROFILE), + WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, + WMI_TLV_PDEV_SUSPEND_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SUSPEND), + WMI_TLV_PDEV_RESUME_CMDID, + WMI_TLV_ADD_BCN_FILTER_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BCN_FILTER), + WMI_TLV_RMV_BCN_FILTER_CMDID, + WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_WOW), + WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID, + WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, + WMI_TLV_WOW_ENABLE_CMDID, + WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, + WMI_TLV_WOW_ACER_IOAC_ADD_KEEPALIVE_CMDID, + WMI_TLV_WOW_ACER_IOAC_DEL_KEEPALIVE_CMDID, + WMI_TLV_WOW_ACER_IOAC_ADD_WAKE_PATTERN_CMDID, + WMI_TLV_WOW_ACER_IOAC_DEL_WAKE_PATTERN_CMDID, + WMI_TLV_D0_WOW_ENABLE_DISABLE_CMDID, + WMI_TLV_EXTWOW_ENABLE_CMDID, + WMI_TLV_EXTWOW_SET_APP_TYPE1_PARAMS_CMDID, + WMI_TLV_EXTWOW_SET_APP_TYPE2_PARAMS_CMDID, + WMI_TLV_RTT_MEASREQ_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RTT), + WMI_TLV_RTT_TSF_CMDID, + WMI_TLV_SPECTRAL_SCAN_CONF_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SPECTRAL), + WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID, + WMI_TLV_REQUEST_STATS_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STATS), + WMI_TLV_MCC_SCHED_TRAFFIC_STATS_CMDID, + WMI_TLV_REQUEST_STATS_EXT_CMDID, + WMI_TLV_REQUEST_LINK_STATS_CMDID, + WMI_TLV_START_LINK_STATS_CMDID, + WMI_TLV_CLEAR_LINK_STATS_CMDID, + WMI_TLV_CGET_FW_MEM_DUMP_CMDID, + WMI_TLV_CDEBUG_MESG_FLUSH_CMDID, + WMI_TLV_CDIAG_EVENT_LOG_CONFIG_CMDID, + WMI_TLV_CREQUEST_WLAN_STATS_CMDID, + WMI_TLV_CREQUEST_RCPI_CMDID, + WMI_TLV_REQUEST_PEER_STATS_INFO_CMDID, + WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_ARP_NS_OFL), + WMI_TLV_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID, + WMI_TLV_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID, + WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID = + WMI_TLV_CMD(WMI_TLV_GRP_NLO_OFL), + WMI_TLV_APFIND_CMDID, + WMI_TLV_GTK_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_GTK_OFL), + WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_CSA_OFL), + WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID, + WMI_TLV_CHATTER_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_CHATTER), + WMI_TLV_CHATTER_ADD_COALESCING_FILTER_CMDID, + WMI_TLV_CHATTER_DELETE_COALESCING_FILTER_CMDID, + WMI_TLV_CHATTER_COALESCING_QUERY_CMDID, + WMI_TLV_PEER_TID_ADDBA_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_TID_ADDBA), + WMI_TLV_PEER_TID_DELBA_CMDID, + WMI_TLV_STA_DTIM_PS_METHOD_CMDID, + WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID, + WMI_TLV_STA_KEEPALIVE_CMDID, + WMI_TLV_BA_REQ_SSN_CMDID, + WMI_TLV_ECHO_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MISC), + WMI_TLV_PDEV_UTF_CMDID, + WMI_TLV_DBGLOG_CFG_CMDID, + WMI_TLV_PDEV_QVIT_CMDID, + WMI_TLV_PDEV_FTM_INTG_CMDID, + WMI_TLV_VDEV_SET_KEEPALIVE_CMDID, + WMI_TLV_VDEV_GET_KEEPALIVE_CMDID, + WMI_TLV_FORCE_FW_HANG_CMDID, + WMI_TLV_SET_MCASTBCAST_FILTER_CMDID, + WMI_TLV_THERMAL_MGMT_CMDID, + WMI_TLV_HOST_AUTO_SHUTDOWN_CFG_CMDID, + WMI_TLV_TPC_CHAINMASK_CONFIG_CMDID, + WMI_TLV_SET_ANTENNA_DIVERSITY_CMDID, + WMI_TLV_GPIO_CONFIG_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_GPIO), + WMI_TLV_GPIO_OUTPUT_CMDID, + WMI_TLV_TXBF_CMDID, + WMI_TLV_FWTEST_VDEV_MCC_SET_TBTT_MODE_CMDID = + WMI_TLV_CMD(WMI_TLV_GRP_FWTEST), + WMI_TLV_FWTEST_P2P_SET_NOA_PARAM_CMDID, + WMI_TLV_UNIT_TEST_CMDID, + WMI_TLV_TDLS_SET_STATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_TDLS), + WMI_TLV_TDLS_PEER_UPDATE_CMDID, + WMI_TLV_TDLS_SET_OFFCHAN_MODE_CMDID, + WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RESMGR), + WMI_TLV_RESMGR_SET_CHAN_TIME_QUOTA_CMDID, + WMI_TLV_RESMGR_SET_CHAN_LATENCY_CMDID, + WMI_TLV_STA_SMPS_FORCE_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STA_SMPS), + WMI_TLV_STA_SMPS_PARAM_CMDID, + WMI_TLV_HB_SET_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_WLAN_HB), + WMI_TLV_HB_SET_TCP_PARAMS_CMDID, + WMI_TLV_HB_SET_TCP_PKT_FILTER_CMDID, + WMI_TLV_HB_SET_UDP_PARAMS_CMDID, + WMI_TLV_HB_SET_UDP_PKT_FILTER_CMDID, + WMI_TLV_RMC_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RMC), + WMI_TLV_RMC_SET_ACTION_PERIOD_CMDID, + WMI_TLV_RMC_CONFIG_CMDID, + WMI_TLV_MHF_OFFLOAD_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MHF_OFL), + WMI_TLV_MHF_OFFLOAD_PLUMB_ROUTING_TBL_CMDID, + WMI_TLV_BATCH_SCAN_ENABLE_CMDID = + WMI_TLV_CMD(WMI_TLV_GRP_LOCATION_SCAN), + WMI_TLV_BATCH_SCAN_DISABLE_CMDID, + WMI_TLV_BATCH_SCAN_TRIGGER_RESULT_CMDID, + WMI_TLV_OEM_REQ_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_OEM), + WMI_TLV_NAN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_NAN), + WMI_TLV_MODEM_POWER_STATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_COEX), + WMI_TLV_CHAN_AVOID_UPDATE_CMDID, + WMI_TLV_OBSS_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_OBSS_OFL), + WMI_TLV_OBSS_SCAN_DISABLE_CMDID, + WMI_TLV_LPI_MGMT_SNOOPING_CONFIG_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_LPI), + WMI_TLV_LPI_START_SCAN_CMDID, + WMI_TLV_LPI_STOP_SCAN_CMDID, + WMI_TLV_EXTSCAN_START_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_EXTSCAN), + WMI_TLV_EXTSCAN_STOP_CMDID, + WMI_TLV_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID, + WMI_TLV_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID, + WMI_TLV_EXTSCAN_GET_CACHED_RESULTS_CMDID, + WMI_TLV_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMDID, + WMI_TLV_EXTSCAN_SET_CAPABILITIES_CMDID, + WMI_TLV_EXTSCAN_GET_CAPABILITIES_CMDID, + WMI_TLV_SET_DHCP_SERVER_OFFLOAD_CMDID = + WMI_TLV_CMD(WMI_TLV_GRP_DHCP_OFL), + WMI_TLV_IPA_OFFLOAD_ENABLE_DISABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_IPA), + WMI_TLV_MDNS_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MDNS_OFL), + WMI_TLV_MDNS_SET_FQDN_CMDID, + WMI_TLV_MDNS_SET_RESPONSE_CMDID, + WMI_TLV_MDNS_GET_STATS_CMDID, + WMI_TLV_SAP_OFL_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SAP_OFL), +}; + +enum wmi_tlv_event_id { + WMI_TLV_SERVICE_READY_EVENTID = 0x1, + WMI_TLV_READY_EVENTID, + WMI_TLV_SERVICE_AVAILABLE_EVENTID, + WMI_TLV_SCAN_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SCAN), + WMI_TLV_PDEV_TPC_CONFIG_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PDEV), + WMI_TLV_CHAN_INFO_EVENTID, + WMI_TLV_PHYERR_EVENTID, + WMI_TLV_PDEV_DUMP_EVENTID, + WMI_TLV_TX_PAUSE_EVENTID, + WMI_TLV_DFS_RADAR_EVENTID, + WMI_TLV_PDEV_L1SS_TRACK_EVENTID, + WMI_TLV_PDEV_TEMPERATURE_EVENTID, + WMI_TLV_VDEV_START_RESP_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_VDEV), + WMI_TLV_VDEV_STOPPED_EVENTID, + WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID, + WMI_TLV_VDEV_MCC_BCN_INTERVAL_CHANGE_REQ_EVENTID, + WMI_TLV_VDEV_TSF_REPORT_EVENTID, + WMI_TLV_VDEV_DELETE_RESP_EVENTID, + WMI_TLV_PEER_STA_KICKOUT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PEER), + WMI_TLV_PEER_INFO_EVENTID, + WMI_TLV_PEER_TX_FAIL_CNT_THR_EVENTID, + WMI_TLV_PEER_ESTIMATED_LINKSPEED_EVENTID, + WMI_TLV_PEER_STATE_EVENTID, + WMI_TLV_PEER_ASSOC_CONF_EVENTID, + WMI_TLV_PEER_DELETE_RESP_EVENTID, + WMI_TLV_MGMT_RX_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MGMT), + WMI_TLV_HOST_SWBA_EVENTID, + WMI_TLV_TBTTOFFSET_UPDATE_EVENTID, + WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID, + WMI_TLV_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID, + WMI_TLV_MGMT_TX_COMPLETION_EVENTID, + WMI_TLV_MGMT_TX_BUNDLE_COMPLETION_EVENTID, + WMI_TLV_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_BA_NEG), + WMI_TLV_TX_ADDBA_COMPLETE_EVENTID, + WMI_TLV_BA_RSP_SSN_EVENTID, + WMI_TLV_AGGR_STATE_TRIG_EVENTID, + WMI_TLV_ROAM_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_ROAM), + WMI_TLV_PROFILE_MATCH, + WMI_TLV_ROAM_SYNCH_EVENTID, + WMI_TLV_P2P_DISC_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_P2P), + WMI_TLV_P2P_NOA_EVENTID, + WMI_TLV_PDEV_RESUME_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SUSPEND), + WMI_TLV_WOW_WAKEUP_HOST_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_WOW), + WMI_TLV_D0_WOW_DISABLE_ACK_EVENTID, + WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_RTT), + WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID, + WMI_TLV_RTT_ERROR_REPORT_EVENTID, + WMI_TLV_STATS_EXT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_STATS), + WMI_TLV_IFACE_LINK_STATS_EVENTID, + WMI_TLV_PEER_LINK_STATS_EVENTID, + WMI_TLV_RADIO_LINK_STATS_EVENTID, + WMI_TLV_UPDATE_FW_MEM_DUMP_EVENTID, + WMI_TLV_DIAG_EVENT_LOG_SUPPORTED_EVENTID, + WMI_TLV_INST_RSSI_STATS_EVENTID, + WMI_TLV_RADIO_TX_POWER_LEVEL_STATS_EVENTID, + WMI_TLV_REPORT_STATS_EVENTID, + WMI_TLV_UPDATE_RCPI_EVENTID, + WMI_TLV_PEER_STATS_INFO_EVENTID, + WMI_TLV_NLO_MATCH_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_NLO_OFL), + WMI_TLV_NLO_SCAN_COMPLETE_EVENTID, + WMI_TLV_APFIND_EVENTID, + WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_GTK_OFL), + WMI_TLV_GTK_REKEY_FAIL_EVENTID, + WMI_TLV_CSA_HANDLING_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_CSA_OFL), + WMI_TLV_CHATTER_PC_QUERY_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_CHATTER), + WMI_TLV_ECHO_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MISC), + WMI_TLV_PDEV_UTF_EVENTID, + WMI_TLV_DEBUG_MESG_EVENTID, + WMI_TLV_UPDATE_STATS_EVENTID, + WMI_TLV_DEBUG_PRINT_EVENTID, + WMI_TLV_DCS_INTERFERENCE_EVENTID, + WMI_TLV_PDEV_QVIT_EVENTID, + WMI_TLV_WLAN_PROFILE_DATA_EVENTID, + WMI_TLV_PDEV_FTM_INTG_EVENTID, + WMI_TLV_WLAN_FREQ_AVOID_EVENTID, + WMI_TLV_VDEV_GET_KEEPALIVE_EVENTID, + WMI_TLV_THERMAL_MGMT_EVENTID, + WMI_TLV_DIAG_DATA_CONTAINER_EVENTID, + WMI_TLV_HOST_AUTO_SHUTDOWN_EVENTID, + WMI_TLV_UPDATE_WHAL_MIB_STATS_EVENTID, + WMI_TLV_UPDATE_VDEV_RATE_STATS_EVENTID, + WMI_TLV_DIAG_EVENTID, + WMI_TLV_GPIO_INPUT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_GPIO), + WMI_TLV_UPLOADH_EVENTID, + WMI_TLV_CAPTUREH_EVENTID, + WMI_TLV_RFKILL_STATE_CHANGE_EVENTID, + WMI_TLV_TDLS_PEER_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_TDLS), + WMI_TLV_BATCH_SCAN_ENABLED_EVENTID = + WMI_TLV_EV(WMI_TLV_GRP_LOCATION_SCAN), + WMI_TLV_BATCH_SCAN_RESULT_EVENTID, + WMI_TLV_OEM_CAPABILITY_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_OEM), + WMI_TLV_OEM_MEASUREMENT_REPORT_EVENTID, + WMI_TLV_OEM_ERROR_REPORT_EVENTID, + WMI_TLV_NAN_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_NAN), + WMI_TLV_LPI_RESULT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_LPI), + WMI_TLV_LPI_STATUS_EVENTID, + WMI_TLV_LPI_HANDOFF_EVENTID, + WMI_TLV_EXTSCAN_START_STOP_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_EXTSCAN), + WMI_TLV_EXTSCAN_OPERATION_EVENTID, + WMI_TLV_EXTSCAN_TABLE_USAGE_EVENTID, + WMI_TLV_EXTSCAN_CACHED_RESULTS_EVENTID, + WMI_TLV_EXTSCAN_WLAN_CHANGE_RESULTS_EVENTID, + WMI_TLV_EXTSCAN_HOTLIST_MATCH_EVENTID, + WMI_TLV_EXTSCAN_CAPABILITIES_EVENTID, + WMI_TLV_MDNS_STATS_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MDNS_OFL), + WMI_TLV_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SAP_OFL), + WMI_TLV_SAP_OFL_DEL_STA_EVENTID, +}; + +enum wmi_tlv_pdev_param { + WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK = 0x1, + WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK, + WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G, + WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G, + WMI_TLV_PDEV_PARAM_TXPOWER_SCALE, + WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE, + WMI_TLV_PDEV_PARAM_BEACON_TX_MODE, + WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE, + WMI_TLV_PDEV_PARAM_PROTECTION_MODE, + WMI_TLV_PDEV_PARAM_DYNAMIC_BW, + WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH, + WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH, + WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH, + WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING, + WMI_TLV_PDEV_PARAM_LTR_ENABLE, + WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE, + WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK, + WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI, + WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO, + WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, + WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE, + WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE, + WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, + WMI_TLV_PDEV_PARAM_L1SS_ENABLE, + WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE, + WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH, + WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_WATERMARK, + WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, + WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE, + WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, + WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, + WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, + WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, + WMI_TLV_PDEV_PARAM_PMF_QOS, + WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE, + WMI_TLV_PDEV_PARAM_DCS, + WMI_TLV_PDEV_PARAM_ANI_ENABLE, + WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD, + WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD, + WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL, + WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL, + WMI_TLV_PDEV_PARAM_DYNTXCHAIN, + WMI_TLV_PDEV_PARAM_PROXY_STA, + WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG, + WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP, + WMI_TLV_PDEV_PARAM_RFKILL_ENABLE, + WMI_TLV_PDEV_PARAM_BURST_DUR, + WMI_TLV_PDEV_PARAM_BURST_ENABLE, + WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG, + WMI_TLV_PDEV_PARAM_LOW_POWER_RF_ENABLE, + WMI_TLV_PDEV_PARAM_L1SS_TRACK, + WMI_TLV_PDEV_PARAM_HYST_EN, + WMI_TLV_PDEV_PARAM_POWER_COLLAPSE_ENABLE, + WMI_TLV_PDEV_PARAM_LED_SYS_STATE, + WMI_TLV_PDEV_PARAM_LED_ENABLE, + WMI_TLV_PDEV_PARAM_AUDIO_OVER_WLAN_LATENCY, + WMI_TLV_PDEV_PARAM_AUDIO_OVER_WLAN_ENABLE, + WMI_TLV_PDEV_PARAM_WHAL_MIB_STATS_UPDATE_ENABLE, + WMI_TLV_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD, + WMI_TLV_PDEV_PARAM_TXPOWER_REASON_NONE, + WMI_TLV_PDEV_PARAM_TXPOWER_REASON_SAR, + WMI_TLV_PDEV_PARAM_PEER_STATS_INFO_ENABLE = 0x8b, + WMI_TLV_PDEV_PARAM_TXPOWER_REASON_MAX, +}; + +enum wmi_tlv_vdev_param { + WMI_TLV_VDEV_PARAM_RTS_THRESHOLD = 0x1, + WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD, + WMI_TLV_VDEV_PARAM_BEACON_INTERVAL, + WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL, + WMI_TLV_VDEV_PARAM_MULTICAST_RATE, + WMI_TLV_VDEV_PARAM_MGMT_TX_RATE, + WMI_TLV_VDEV_PARAM_SLOT_TIME, + WMI_TLV_VDEV_PARAM_PREAMBLE, + WMI_TLV_VDEV_PARAM_SWBA_TIME, + WMI_TLV_VDEV_STATS_UPDATE_PERIOD, + WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME, + WMI_TLV_VDEV_HOST_SWBA_INTERVAL, + WMI_TLV_VDEV_PARAM_DTIM_PERIOD, + WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, + WMI_TLV_VDEV_PARAM_WDS, + WMI_TLV_VDEV_PARAM_ATIM_WINDOW, + WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX, + WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT, + WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT, + WMI_TLV_VDEV_PARAM_FEATURE_WMM, + WMI_TLV_VDEV_PARAM_CHWIDTH, + WMI_TLV_VDEV_PARAM_CHEXTOFFSET, + WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION, + WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT, + WMI_TLV_VDEV_PARAM_MGMT_RATE, + WMI_TLV_VDEV_PARAM_PROTECTION_MODE, + WMI_TLV_VDEV_PARAM_FIXED_RATE, + WMI_TLV_VDEV_PARAM_SGI, + WMI_TLV_VDEV_PARAM_LDPC, + WMI_TLV_VDEV_PARAM_TX_STBC, + WMI_TLV_VDEV_PARAM_RX_STBC, + WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD, + WMI_TLV_VDEV_PARAM_DEF_KEYID, + WMI_TLV_VDEV_PARAM_NSS, + WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE, + WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE, + WMI_TLV_VDEV_PARAM_MCAST_INDICATE, + WMI_TLV_VDEV_PARAM_DHCP_INDICATE, + WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE, + WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, + WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, + WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, + WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS, + WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS, + WMI_TLV_VDEV_PARAM_TXBF, + WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE, + WMI_TLV_VDEV_PARAM_DROP_UNENCRY, + WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE, + WMI_TLV_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, + WMI_TLV_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE, + WMI_TLV_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM, + WMI_TLV_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE, + WMI_TLV_VDEV_PARAM_EARLY_RX_SLOP_STEP, + WMI_TLV_VDEV_PARAM_EARLY_RX_INIT_SLOP, + WMI_TLV_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE, + WMI_TLV_VDEV_PARAM_TX_PWRLIMIT, + WMI_TLV_VDEV_PARAM_SNR_NUM_FOR_CAL, + WMI_TLV_VDEV_PARAM_ROAM_FW_OFFLOAD, + WMI_TLV_VDEV_PARAM_ENABLE_RMC, + WMI_TLV_VDEV_PARAM_IBSS_MAX_BCN_LOST_MS, + WMI_TLV_VDEV_PARAM_MAX_RATE, + WMI_TLV_VDEV_PARAM_EARLY_RX_DRIFT_SAMPLE, + WMI_TLV_VDEV_PARAM_SET_IBSS_TX_FAIL_CNT_THR, + WMI_TLV_VDEV_PARAM_EBT_RESYNC_TIMEOUT, + WMI_TLV_VDEV_PARAM_AGGR_TRIG_EVENT_ENABLE, + WMI_TLV_VDEV_PARAM_IS_IBSS_POWER_SAVE_ALLOWED, + WMI_TLV_VDEV_PARAM_IS_POWER_COLLAPSE_ALLOWED, + WMI_TLV_VDEV_PARAM_IS_AWAKE_ON_TXRX_ENABLED, + WMI_TLV_VDEV_PARAM_INACTIVITY_CNT, + WMI_TLV_VDEV_PARAM_TXSP_END_INACTIVITY_TIME_MS, + WMI_TLV_VDEV_PARAM_DTIM_POLICY, + WMI_TLV_VDEV_PARAM_IBSS_PS_WARMUP_TIME_SECS, + WMI_TLV_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE, +}; + +enum wmi_tlv_peer_param { + WMI_TLV_PEER_SMPS_STATE = 0x1, /* see %wmi_peer_smps_state */ + WMI_TLV_PEER_AMPDU = 0x2, + WMI_TLV_PEER_AUTHORIZE = 0x3, + WMI_TLV_PEER_CHAN_WIDTH = 0x4, + WMI_TLV_PEER_NSS = 0x5, + WMI_TLV_PEER_USE_4ADDR = 0x6, + WMI_TLV_PEER_MEMBERSHIP = 0x7, + WMI_TLV_PEER_USERPOS = 0x8, + WMI_TLV_PEER_CRIT_PROTO_HINT_ENABLED = 0x9, + WMI_TLV_PEER_TX_FAIL_CNT_THR = 0xa, + WMI_TLV_PEER_SET_HW_RETRY_CTS2S = 0xb, + WMI_TLV_PEER_IBSS_ATIM_WINDOW_LENGTH = 0xc, + WMI_TLV_PEER_PHYMODE = 0xd, + WMI_TLV_PEER_USE_FIXED_PWR = 0xe, + WMI_TLV_PEER_DUMMY_VAR = 0xff, +}; + +enum wmi_tlv_peer_flags { + WMI_TLV_PEER_AUTH = 0x00000001, + WMI_TLV_PEER_QOS = 0x00000002, + WMI_TLV_PEER_NEED_PTK_4_WAY = 0x00000004, + WMI_TLV_PEER_NEED_GTK_2_WAY = 0x00000010, + WMI_TLV_PEER_APSD = 0x00000800, + WMI_TLV_PEER_HT = 0x00001000, + WMI_TLV_PEER_40MHZ = 0x00002000, + WMI_TLV_PEER_STBC = 0x00008000, + WMI_TLV_PEER_LDPC = 0x00010000, + WMI_TLV_PEER_DYN_MIMOPS = 0x00020000, + WMI_TLV_PEER_STATIC_MIMOPS = 0x00040000, + WMI_TLV_PEER_SPATIAL_MUX = 0x00200000, + WMI_TLV_PEER_VHT = 0x02000000, + WMI_TLV_PEER_80MHZ = 0x04000000, + WMI_TLV_PEER_PMF = 0x08000000, + WMI_TLV_PEER_160MHZ = 0x20000000, +}; + +enum wmi_tlv_tag { + WMI_TLV_TAG_LAST_RESERVED = 15, + + WMI_TLV_TAG_FIRST_ARRAY_ENUM, + WMI_TLV_TAG_ARRAY_UINT32 = WMI_TLV_TAG_FIRST_ARRAY_ENUM, + WMI_TLV_TAG_ARRAY_BYTE, + WMI_TLV_TAG_ARRAY_STRUCT, + WMI_TLV_TAG_ARRAY_FIXED_STRUCT, + WMI_TLV_TAG_LAST_ARRAY_ENUM = 31, + + WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT, + WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES, + WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ, + WMI_TLV_TAG_STRUCT_READY_EVENT, + WMI_TLV_TAG_STRUCT_SCAN_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_TPC_CONFIG_EVENT, + WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT, + WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR, + WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT, + WMI_TLV_TAG_STRUCT_VDEV_STOPPED_EVENT, + WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_COMPLETE_EVENT, + WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT, + WMI_TLV_TAG_STRUCT_MGMT_RX_HDR, + WMI_TLV_TAG_STRUCT_TBTT_OFFSET_EVENT, + WMI_TLV_TAG_STRUCT_TX_DELBA_COMPLETE_EVENT, + WMI_TLV_TAG_STRUCT_TX_ADDBA_COMPLETE_EVENT, + WMI_TLV_TAG_STRUCT_ROAM_EVENT, + WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO, + WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO_SECTION_BITMAP, + WMI_TLV_TAG_STRUCT_RTT_EVENT_HEADER, + WMI_TLV_TAG_STRUCT_RTT_ERROR_REPORT_EVENT, + WMI_TLV_TAG_STRUCT_RTT_MEAS_EVENT, + WMI_TLV_TAG_STRUCT_ECHO_EVENT, + WMI_TLV_TAG_STRUCT_FTM_INTG_EVENT, + WMI_TLV_TAG_STRUCT_VDEV_GET_KEEPALIVE_EVENT, + WMI_TLV_TAG_STRUCT_GPIO_INPUT_EVENT, + WMI_TLV_TAG_STRUCT_CSA_EVENT, + WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_STATUS_EVENT, + WMI_TLV_TAG_STRUCT_IGTK_INFO, + WMI_TLV_TAG_STRUCT_DCS_INTERFERENCE_EVENT, + WMI_TLV_TAG_STRUCT_ATH_DCS_CW_INT, + WMI_TLV_TAG_STRUCT_ATH_DCS_WLAN_INT_STAT, + WMI_TLV_TAG_STRUCT_WLAN_PROFILE_CTX_T, + WMI_TLV_TAG_STRUCT_WLAN_PROFILE_T, + WMI_TLV_TAG_STRUCT_PDEV_QVIT_EVENT, + WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT, + WMI_TLV_TAG_STRUCT_TIM_INFO, + WMI_TLV_TAG_STRUCT_P2P_NOA_INFO, + WMI_TLV_TAG_STRUCT_STATS_EVENT, + WMI_TLV_TAG_STRUCT_AVOID_FREQ_RANGES_EVENT, + WMI_TLV_TAG_STRUCT_AVOID_FREQ_RANGE_DESC, + WMI_TLV_TAG_STRUCT_GTK_REKEY_FAIL_EVENT, + WMI_TLV_TAG_STRUCT_INIT_CMD, + WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG, + WMI_TLV_TAG_STRUCT_WLAN_HOST_MEMORY_CHUNK, + WMI_TLV_TAG_STRUCT_START_SCAN_CMD, + WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD, + WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD, + WMI_TLV_TAG_STRUCT_CHANNEL, + WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD, + WMI_TLV_TAG_STRUCT_WMM_PARAMS, + WMI_TLV_TAG_STRUCT_PDEV_SET_QUIET_CMD, + WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD, + WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD, + WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD, + WMI_TLV_TAG_STRUCT_P2P_NOA_DESCRIPTOR, + WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE, + WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_CMD, + WMI_TLV_TAG_STRUCT_VDEV_UP_CMD, + WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD, + WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD, + WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD, + WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD, + WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD, + WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD, + WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD, + WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD, + WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD, + WMI_TLV_TAG_STRUCT_VHT_RATE_SET, + WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD, + WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD, + WMI_TLV_TAG_STRUCT_BCN_PRB_INFO, + WMI_TLV_TAG_STRUCT_PEER_TID_ADDBA_CMD, + WMI_TLV_TAG_STRUCT_PEER_TID_DELBA_CMD, + WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD, + WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD, + WMI_TLV_TAG_STRUCT_STA_DTIM_PS_METHOD_CMD, + WMI_TLV_TAG_STRUCT_ROAM_SCAN_MODE, + WMI_TLV_TAG_STRUCT_ROAM_SCAN_RSSI_THRESHOLD, + WMI_TLV_TAG_STRUCT_ROAM_SCAN_PERIOD, + WMI_TLV_TAG_STRUCT_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD, + WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD, + WMI_TLV_TAG_STRUCT_ADD_BCN_FILTER_CMD, + WMI_TLV_TAG_STRUCT_RMV_BCN_FILTER_CMD, + WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD, + WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD, + WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD, + WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM, + WMI_TLV_TAG_STRUCT_SET_ARP_NS_OFFLOAD_CMD, + WMI_TLV_TAG_STRUCT_ARP_OFFLOAD_TUPLE, + WMI_TLV_TAG_STRUCT_NS_OFFLOAD_TUPLE, + WMI_TLV_TAG_STRUCT_FTM_INTG_CMD, + WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD, + WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE, + WMI_TLV_TAG_STRUCT_P2P_SET_VENDOR_IE_DATA_CMD, + WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD, + WMI_TLV_TAG_STRUCT_PEER_RATE_RETRY_SCHED_CMD, + WMI_TLV_TAG_STRUCT_WLAN_PROFILE_TRIGGER_CMD, + WMI_TLV_TAG_STRUCT_WLAN_PROFILE_SET_HIST_INTVL_CMD, + WMI_TLV_TAG_STRUCT_WLAN_PROFILE_GET_PROF_DATA_CMD, + WMI_TLV_TAG_STRUCT_WLAN_PROFILE_ENABLE_PROFILE_ID_CMD, + WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD, + WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD, + WMI_TLV_TAG_STRUCT_RTT_MEASREQ_HEAD, + WMI_TLV_TAG_STRUCT_RTT_MEASREQ_BODY, + WMI_TLV_TAG_STRUCT_RTT_TSF_CMD, + WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_CONFIGURE_CMD, + WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_ENABLE_CMD, + WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD, + WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_NLO_CONFIGURED_PARAMETERS, + WMI_TLV_TAG_STRUCT_CSA_OFFLOAD_ENABLE_CMD, + WMI_TLV_TAG_STRUCT_CSA_OFFLOAD_CHANSWITCH_CMD, + WMI_TLV_TAG_STRUCT_CHATTER_SET_MODE_CMD, + WMI_TLV_TAG_STRUCT_ECHO_CMD, + WMI_TLV_TAG_STRUCT_VDEV_SET_KEEPALIVE_CMD, + WMI_TLV_TAG_STRUCT_VDEV_GET_KEEPALIVE_CMD, + WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD, + WMI_TLV_TAG_STRUCT_GPIO_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_GPIO_OUTPUT_CMD, + WMI_TLV_TAG_STRUCT_PEER_ADD_WDS_ENTRY_CMD, + WMI_TLV_TAG_STRUCT_PEER_REMOVE_WDS_ENTRY_CMD, + WMI_TLV_TAG_STRUCT_BCN_TX_HDR, + WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD, + WMI_TLV_TAG_STRUCT_MGMT_TX_HDR, + WMI_TLV_TAG_STRUCT_ADDBA_CLEAR_RESP_CMD, + WMI_TLV_TAG_STRUCT_ADDBA_SEND_CMD, + WMI_TLV_TAG_STRUCT_DELBA_SEND_CMD, + WMI_TLV_TAG_STRUCT_ADDBA_SETRESPONSE_CMD, + WMI_TLV_TAG_STRUCT_SEND_SINGLEAMSDU_CMD, + WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_HT_IE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_VHT_IE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_DSCP_TID_MAP_CMD, + WMI_TLV_TAG_STRUCT_PDEV_GREEN_AP_PS_ENABLE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_GET_TPC_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_BASE_MACADDR_CMD, + WMI_TLV_TAG_STRUCT_PEER_MCAST_GROUP_CMD, + WMI_TLV_TAG_STRUCT_ROAM_AP_PROFILE, + WMI_TLV_TAG_STRUCT_AP_PROFILE, + WMI_TLV_TAG_STRUCT_SCAN_SCH_PRIORITY_TABLE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_DFS_ENABLE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_DFS_DISABLE_CMD, + WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD, + WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T, + WMI_TLV_TAG_STRUCT_WOW_IPV4_SYNC_PATTERN_T, + WMI_TLV_TAG_STRUCT_WOW_IPV6_SYNC_PATTERN_T, + WMI_TLV_TAG_STRUCT_WOW_MAGIC_PATTERN_CMD, + WMI_TLV_TAG_STRUCT_SCAN_UPDATE_REQUEST_CMD, + WMI_TLV_TAG_STRUCT_CHATTER_PKT_COALESCING_FILTER, + WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_ADD_FILTER_CMD, + WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_DELETE_FILTER_CMD, + WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_QUERY_CMD, + WMI_TLV_TAG_STRUCT_TXBF_CMD, + WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_NLO_EVENT, + WMI_TLV_TAG_STRUCT_CHATTER_QUERY_REPLY_EVENT, + WMI_TLV_TAG_STRUCT_UPLOAD_H_HDR, + WMI_TLV_TAG_STRUCT_CAPTURE_H_EVENT_HDR, + WMI_TLV_TAG_STRUCT_VDEV_WNM_SLEEPMODE_CMD, + WMI_TLV_TAG_STRUCT_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD, + WMI_TLV_TAG_STRUCT_VDEV_WMM_ADDTS_CMD, + WMI_TLV_TAG_STRUCT_VDEV_WMM_DELTS_CMD, + WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD, + WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD, + WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD, + WMI_TLV_TAG_STRUCT_TDLS_PEER_EVENT, + WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES, + WMI_TLV_TAG_STRUCT_VDEV_MCC_SET_TBTT_MODE_CMD, + WMI_TLV_TAG_STRUCT_ROAM_CHAN_LIST, + WMI_TLV_TAG_STRUCT_VDEV_MCC_BCN_INTVL_CHANGE_EVENT, + WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD, + WMI_TLV_TAG_STRUCT_RESMGR_SET_CHAN_TIME_QUOTA_CMD, + WMI_TLV_TAG_STRUCT_RESMGR_SET_CHAN_LATENCY_CMD, + WMI_TLV_TAG_STRUCT_BA_REQ_SSN_CMD, + WMI_TLV_TAG_STRUCT_BA_RSP_SSN_EVENT, + WMI_TLV_TAG_STRUCT_STA_SMPS_FORCE_MODE_CMD, + WMI_TLV_TAG_STRUCT_SET_MCASTBCAST_FILTER_CMD, + WMI_TLV_TAG_STRUCT_P2P_SET_OPPPS_CMD, + WMI_TLV_TAG_STRUCT_P2P_SET_NOA_CMD, + WMI_TLV_TAG_STRUCT_BA_REQ_SSN_CMD_SUB_STRUCT_PARAM, + WMI_TLV_TAG_STRUCT_BA_REQ_SSN_EVENT_SUB_STRUCT_PARAM, + WMI_TLV_TAG_STRUCT_STA_SMPS_PARAM_CMD, + WMI_TLV_TAG_STRUCT_VDEV_SET_GTX_PARAMS_CMD, + WMI_TLV_TAG_STRUCT_MCC_SCHED_TRAFFIC_STATS_CMD, + WMI_TLV_TAG_STRUCT_MCC_SCHED_STA_TRAFFIC_STATS, + WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT, + WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT, + WMI_TLV_TAG_STRUCT_HB_SET_ENABLE_CMD, + WMI_TLV_TAG_STRUCT_HB_SET_TCP_PARAMS_CMD, + WMI_TLV_TAG_STRUCT_HB_SET_TCP_PKT_FILTER_CMD, + WMI_TLV_TAG_STRUCT_HB_SET_UDP_PARAMS_CMD, + WMI_TLV_TAG_STRUCT_HB_SET_UDP_PKT_FILTER_CMD, + WMI_TLV_TAG_STRUCT_HB_IND_EVENT, + WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT, + WMI_TLV_TAG_STRUCT_RFKILL_EVENT, + WMI_TLV_TAG_STRUCT_DFS_RADAR_EVENT, + WMI_TLV_TAG_STRUCT_DFS_PHYERR_FILTER_ENA_CMD, + WMI_TLV_TAG_STRUCT_DFS_PHYERR_FILTER_DIS_CMD, + WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_SCAN_LIST, + WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_NETWORK_INFO, + WMI_TLV_TAG_STRUCT_BATCH_SCAN_ENABLE_CMD, + WMI_TLV_TAG_STRUCT_BATCH_SCAN_DISABLE_CMD, + WMI_TLV_TAG_STRUCT_BATCH_SCAN_TRIGGER_RESULT_CMD, + WMI_TLV_TAG_STRUCT_BATCH_SCAN_ENABLED_EVENT, + WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_EVENT, + WMI_TLV_TAG_STRUCT_VDEV_PLMREQ_START_CMD, + WMI_TLV_TAG_STRUCT_VDEV_PLMREQ_STOP_CMD, + WMI_TLV_TAG_STRUCT_THERMAL_MGMT_CMD, + WMI_TLV_TAG_STRUCT_THERMAL_MGMT_EVENT, + WMI_TLV_TAG_STRUCT_PEER_INFO_REQ_CMD, + WMI_TLV_TAG_STRUCT_PEER_INFO_EVENT, + WMI_TLV_TAG_STRUCT_PEER_INFO, + WMI_TLV_TAG_STRUCT_PEER_TX_FAIL_CNT_THR_EVENT, + WMI_TLV_TAG_STRUCT_RMC_SET_MODE_CMD, + WMI_TLV_TAG_STRUCT_RMC_SET_ACTION_PERIOD_CMD, + WMI_TLV_TAG_STRUCT_RMC_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_SET_MODE_CMD, + WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_PLUMB_ROUTING_TABLE_CMD, + WMI_TLV_TAG_STRUCT_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD, + WMI_TLV_TAG_STRUCT_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD, + WMI_TLV_TAG_STRUCT_NAN_CMD_PARAM, + WMI_TLV_TAG_STRUCT_NAN_EVENT_HDR, + WMI_TLV_TAG_STRUCT_PDEV_L1SS_TRACK_EVENT, + WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT, + WMI_TLV_TAG_STRUCT_MODEM_POWER_STATE_CMD_PARAM, + WMI_TLV_TAG_STRUCT_PEER_GET_ESTIMATED_LINKSPEED_CMD, + WMI_TLV_TAG_STRUCT_PEER_ESTIMATED_LINKSPEED_EVENT, + WMI_TLV_TAG_STRUCT_AGGR_STATE_TRIG_EVENT, + WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_ROUTING_TABLE_ENTRY, + WMI_TLV_TAG_STRUCT_ROAM_SCAN_CMD, + WMI_TLV_TAG_STRUCT_REQ_STATS_EXT_CMD, + WMI_TLV_TAG_STRUCT_STATS_EXT_EVENT, + WMI_TLV_TAG_STRUCT_OBSS_SCAN_ENABLE_CMD, + WMI_TLV_TAG_STRUCT_OBSS_SCAN_DISABLE_CMD, + WMI_TLV_TAG_STRUCT_OFFLOAD_PRB_RSP_TX_STATUS_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_SET_LED_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_HOST_AUTO_SHUTDOWN_CFG_CMD, + WMI_TLV_TAG_STRUCT_HOST_AUTO_SHUTDOWN_EVENT, + WMI_TLV_TAG_STRUCT_UPDATE_WHAL_MIB_STATS_EVENT, + WMI_TLV_TAG_STRUCT_CHAN_AVOID_UPDATE_CMD_PARAM, + WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_PKT_PATTERN_T, + WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_TMR_PATTERN_T, + WMI_TLV_TAG_STRUCT_WOW_IOAC_ADD_KEEPALIVE_CMD, + WMI_TLV_TAG_STRUCT_WOW_IOAC_DEL_KEEPALIVE_CMD, + WMI_TLV_TAG_STRUCT_WOW_IOAC_KEEPALIVE_T, + WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_ADD_PATTERN_CMD, + WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_DEL_PATTERN_CMD, + WMI_TLV_TAG_STRUCT_START_LINK_STATS_CMD, + WMI_TLV_TAG_STRUCT_CLEAR_LINK_STATS_CMD, + WMI_TLV_TAG_STRUCT_REQUEST_LINK_STATS_CMD, + WMI_TLV_TAG_STRUCT_IFACE_LINK_STATS_EVENT, + WMI_TLV_TAG_STRUCT_RADIO_LINK_STATS_EVENT, + WMI_TLV_TAG_STRUCT_PEER_STATS_EVENT, + WMI_TLV_TAG_STRUCT_CHANNEL_STATS, + WMI_TLV_TAG_STRUCT_RADIO_LINK_STATS, + WMI_TLV_TAG_STRUCT_RATE_STATS, + WMI_TLV_TAG_STRUCT_PEER_LINK_STATS, + WMI_TLV_TAG_STRUCT_WMM_AC_STATS, + WMI_TLV_TAG_STRUCT_IFACE_LINK_STATS, + WMI_TLV_TAG_STRUCT_LPI_MGMT_SNOOPING_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_LPI_START_SCAN_CMD, + WMI_TLV_TAG_STRUCT_LPI_STOP_SCAN_CMD, + WMI_TLV_TAG_STRUCT_LPI_RESULT_EVENT, + WMI_TLV_TAG_STRUCT_PEER_STATE_EVENT, + WMI_TLV_TAG_STRUCT_EXTSCAN_BUCKET_CMD, + WMI_TLV_TAG_STRUCT_EXTSCAN_BUCKET_CHANNEL_EVENT, + WMI_TLV_TAG_STRUCT_EXTSCAN_START_CMD, + WMI_TLV_TAG_STRUCT_EXTSCAN_STOP_CMD, + WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMD, + WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_BSSID_PARAM_CMD, + WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMD, + WMI_TLV_TAG_STRUCT_EXTSCAN_GET_CACHED_RESULTS_CMD, + WMI_TLV_TAG_STRUCT_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMD, + WMI_TLV_TAG_STRUCT_EXTSCAN_SET_CAPABILITIES_CMD, + WMI_TLV_TAG_STRUCT_EXTSCAN_GET_CAPABILITIES_CMD, + WMI_TLV_TAG_STRUCT_EXTSCAN_OPERATION_EVENT, + WMI_TLV_TAG_STRUCT_EXTSCAN_START_STOP_EVENT, + WMI_TLV_TAG_STRUCT_EXTSCAN_TABLE_USAGE_EVENT, + WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_DESCRIPTOR_EVENT, + WMI_TLV_TAG_STRUCT_EXTSCAN_RSSI_INFO_EVENT, + WMI_TLV_TAG_STRUCT_EXTSCAN_CACHED_RESULTS_EVENT, + WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_RESULTS_EVENT, + WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_RESULT_BSSID_EVENT, + WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_MATCH_EVENT, + WMI_TLV_TAG_STRUCT_EXTSCAN_CAPABILITIES_EVENT, + WMI_TLV_TAG_STRUCT_EXTSCAN_CACHE_CAPABILITIES_EVENT, + WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_MONITOR_CAPABILITIES_EVENT, + WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_MONITOR_CAPABILITIES_EVENT, + WMI_TLV_TAG_STRUCT_D0_WOW_ENABLE_DISABLE_CMD, + WMI_TLV_TAG_STRUCT_D0_WOW_DISABLE_ACK_EVENT, + WMI_TLV_TAG_STRUCT_UNIT_TEST_CMD, + WMI_TLV_TAG_STRUCT_ROAM_OFFLOAD_TLV_PARAM, + WMI_TLV_TAG_STRUCT_ROAM_11I_OFFLOAD_TLV_PARAM, + WMI_TLV_TAG_STRUCT_ROAM_11R_OFFLOAD_TLV_PARAM, + WMI_TLV_TAG_STRUCT_ROAM_ESE_OFFLOAD_TLV_PARAM, + WMI_TLV_TAG_STRUCT_ROAM_SYNCH_EVENT, + WMI_TLV_TAG_STRUCT_ROAM_SYNCH_COMPLETE, + WMI_TLV_TAG_STRUCT_EXTWOW_ENABLE_CMD, + WMI_TLV_TAG_STRUCT_EXTWOW_SET_APP_TYPE1_PARAMS_CMD, + WMI_TLV_TAG_STRUCT_EXTWOW_SET_APP_TYPE2_PARAMS_CMD, + WMI_TLV_TAG_STRUCT_LPI_STATUS_EVENT, + WMI_TLV_TAG_STRUCT_LPI_HANDOFF_EVENT, + WMI_TLV_TAG_STRUCT_VDEV_RATE_STATS_EVENT, + WMI_TLV_TAG_STRUCT_VDEV_RATE_HT_INFO, + WMI_TLV_TAG_STRUCT_RIC_REQUEST, + WMI_TLV_TAG_STRUCT_PDEV_GET_TEMPERATURE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_TEMPERATURE_EVENT, + WMI_TLV_TAG_STRUCT_SET_DHCP_SERVER_OFFLOAD_CMD, + WMI_TLV_TAG_STRUCT_TPC_CHAINMASK_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_RIC_TSPEC, + WMI_TLV_TAG_STRUCT_TPC_CHAINMASK_CONFIG, + WMI_TLV_TAG_STRUCT_IPA_OFFLOAD_CMD, + WMI_TLV_TAG_STRUCT_SCAN_PROB_REQ_OUI_CMD, + WMI_TLV_TAG_STRUCT_KEY_MATERIAL, + WMI_TLV_TAG_STRUCT_TDLS_SET_OFFCHAN_MODE_CMD, + WMI_TLV_TAG_STRUCT_SET_LED_FLASHING_CMD, + WMI_TLV_TAG_STRUCT_MDNS_OFFLOAD_CMD, + WMI_TLV_TAG_STRUCT_MDNS_SET_FQDN_CMD, + WMI_TLV_TAG_STRUCT_MDNS_SET_RESP_CMD, + WMI_TLV_TAG_STRUCT_MDNS_GET_STATS_CMD, + WMI_TLV_TAG_STRUCT_MDNS_STATS_EVENT, + WMI_TLV_TAG_STRUCT_ROAM_INVOKE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_RESUME_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_SET_ANTENNA_DIVERSITY_CMD, + WMI_TLV_TAG_STRUCT_SAP_OFL_ENABLE_CMD, + WMI_TLV_TAG_STRUCT_SAP_OFL_ADD_STA_EVENT, + WMI_TLV_TAG_STRUCT_SAP_OFL_DEL_STA_EVENT, + WMI_TLV_TAG_STRUCT_APFIND_CMD_PARAM, + WMI_TLV_TAG_STRUCT_APFIND_EVENT_HDR, + WMI_TLV_TAG_STRUCT_OCB_SET_SCHED_CMD, + WMI_TLV_TAG_STRUCT_OCB_SET_SCHED_EVENT, + WMI_TLV_TAG_STRUCT_OCB_SET_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_OCB_SET_CONFIG_RESP_EVENT, + WMI_TLV_TAG_STRUCT_OCB_SET_UTC_TIME_CMD, + WMI_TLV_TAG_STRUCT_OCB_START_TIMING_ADVERT_CMD, + WMI_TLV_TAG_STRUCT_OCB_STOP_TIMING_ADVERT_CMD, + WMI_TLV_TAG_STRUCT_OCB_GET_TSF_TIMER_CMD, + WMI_TLV_TAG_STRUCT_OCB_GET_TSF_TIMER_RESP_EVENT, + WMI_TLV_TAG_STRUCT_DCC_GET_STATS_CMD, + WMI_TLV_TAG_STRUCT_DCC_CHANNEL_STATS_REQUEST, + WMI_TLV_TAG_STRUCT_DCC_GET_STATS_RESP_EVENT, + WMI_TLV_TAG_STRUCT_DCC_CLEAR_STATS_CMD, + WMI_TLV_TAG_STRUCT_DCC_UPDATE_NDL_CMD, + WMI_TLV_TAG_STRUCT_DCC_UPDATE_NDL_RESP_EVENT, + WMI_TLV_TAG_STRUCT_DCC_STATS_EVENT, + WMI_TLV_TAG_STRUCT_OCB_CHANNEL, + WMI_TLV_TAG_STRUCT_OCB_SCHEDULE_ELEMENT, + WMI_TLV_TAG_STRUCT_DCC_NDL_STATS_PER_CHANNEL, + WMI_TLV_TAG_STRUCT_DCC_NDL_CHAN, + WMI_TLV_TAG_STRUCT_QOS_PARAMETER, + WMI_TLV_TAG_STRUCT_DCC_NDL_ACTIVE_STATE_CONFIG, + WMI_TLV_TAG_STRUCT_ROAM_SCAN_EXTENDED_THRESHOLD_PARAM, + WMI_TLV_TAG_STRUCT_ROAM_FILTER_FIXED_PARAM, + WMI_TLV_TAG_STRUCT_PASSPOINT_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_PASSPOINT_EVENT_HDR, + WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMD, + WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_SSID_MATCH_EVENT, + WMI_TLV_TAG_STRUCT_VDEV_TSF_TSTAMP_ACTION_CMD, + WMI_TLV_TAG_STRUCT_VDEV_TSF_REPORT_EVENT, + WMI_TLV_TAG_STRUCT_GET_FW_MEM_DUMP, + WMI_TLV_TAG_STRUCT_UPDATE_FW_MEM_DUMP, + WMI_TLV_TAG_STRUCT_FW_MEM_DUMP_PARAMS, + WMI_TLV_TAG_STRUCT_DEBUG_MESG_FLUSH, + WMI_TLV_TAG_STRUCT_DEBUG_MESG_FLUSH_COMPLETE, + WMI_TLV_TAG_STRUCT_PEER_SET_RATE_REPORT_CONDITION, + WMI_TLV_TAG_STRUCT_ROAM_SUBNET_CHANGE_CONFIG, + WMI_TLV_TAG_STRUCT_VDEV_SET_IE_CMD, + WMI_TLV_TAG_STRUCT_RSSI_BREACH_MONITOR_CONFIG, + WMI_TLV_TAG_STRUCT_RSSI_BREACH_EVENT, + WMI_TLV_TAG_STRUCT_EVENT_INITIAL_WAKEUP, + WMI_TLV_TAG_STRUCT_SOC_SET_PCL_CMD, + WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_CMD, + WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_RESPONSE_EVENT, + WMI_TLV_TAG_STRUCT_SOC_HW_MODE_TRANSITION_EVENT, + WMI_TLV_TAG_STRUCT_VDEV_TXRX_STREAMS, + WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY, + WMI_TLV_TAG_STRUCT_SOC_SET_DUAL_MAC_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_SOC_SET_DUAL_MAC_CONFIG_RESPONSE_EVENT, + WMI_TLV_TAG_STRUCT_IOAC_SOCK_PATTERN_T, + WMI_TLV_TAG_STRUCT_WOW_ENABLE_ICMPV6_NA_FLT_CMD, + WMI_TLV_TAG_STRUCT_DIAG_EVENT_LOG_CONFIG, + WMI_TLV_TAG_STRUCT_DIAG_EVENT_LOG_SUPPORTED_EVENT, + WMI_TLV_TAG_STRUCT_PACKET_FILTER_CONFIG, + WMI_TLV_TAG_STRUCT_PACKET_FILTER_ENABLE, + WMI_TLV_TAG_STRUCT_SAP_SET_BLACKLIST_PARAM_CMD, + WMI_TLV_TAG_STRUCT_MGMT_TX_CMD, + WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT, + WMI_TLV_TAG_STRUCT_SOC_SET_ANTENNA_MODE_CMD, + WMI_TLV_TAG_STRUCT_WOW_UDP_SVC_OFLD_CMD, + WMI_TLV_TAG_STRUCT_LRO_INFO_CMD, + WMI_TLV_TAG_STRUCT_ROAM_EARLYSTOP_RSSI_THRES_PARAM, + WMI_TLV_TAG_STRUCT_SERVICE_READY_EXT_EVENT, + WMI_TLV_TAG_STRUCT_MAWC_SENSOR_REPORT_IND_CMD, + WMI_TLV_TAG_STRUCT_MAWC_ENABLE_SENSOR_EVENT, + WMI_TLV_TAG_STRUCT_ROAM_CONFIGURE_MAWC_CMD, + WMI_TLV_TAG_STRUCT_NLO_CONFIGURE_MAWC_CMD, + WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_MAWC_CMD, + WMI_TLV_TAG_STRUCT_PEER_ASSOC_CONF_EVENT, + WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_AP_PS_EGAP_PARAM_CMD, + WMI_TLV_TAG_STRUCT_AP_PS_EGAP_INFO_EVENT, + WMI_TLV_TAG_STRUCT_PMF_OFFLOAD_SET_SA_QUERY_CMD, + WMI_TLV_TAG_STRUCT_TRANSFER_DATA_TO_FLASH_CMD, + WMI_TLV_TAG_STRUCT_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENT, + WMI_TLV_TAG_STRUCT_SCPC_EVENT, + WMI_TLV_TAG_STRUCT_AP_PS_EGAP_INFO_CHAINMASK_LIST, + WMI_TLV_TAG_STRUCT_STA_SMPS_FORCE_MODE_COMPLETE_EVENT, + WMI_TLV_TAG_STRUCT_BPF_GET_CAPABILITY_CMD, + WMI_TLV_TAG_STRUCT_BPF_CAPABILITY_INFO_EVT, + WMI_TLV_TAG_STRUCT_BPF_GET_VDEV_STATS_CMD, + WMI_TLV_TAG_STRUCT_BPF_VDEV_STATS_INFO_EVT, + WMI_TLV_TAG_STRUCT_BPF_SET_VDEV_INSTRUCTIONS_CMD, + WMI_TLV_TAG_STRUCT_BPF_DEL_VDEV_INSTRUCTIONS_CMD, + WMI_TLV_TAG_STRUCT_VDEV_DELETE_RESP_EVENT, + WMI_TLV_TAG_STRUCT_PEER_DELETE_RESP_EVENT, + WMI_TLV_TAG_STRUCT_ROAM_DENSE_THRES_PARAM, + WMI_TLV_TAG_STRUCT_ENLO_CANDIDATE_SCORE_PARAM, + WMI_TLV_TAG_STRUCT_PEER_UPDATE_WDS_ENTRY_CMD, + WMI_TLV_TAG_STRUCT_VDEV_CONFIG_RATEMASK, + WMI_TLV_TAG_STRUCT_PDEV_FIPS_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SMART_ANT_ENABLE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SMART_ANT_SET_RX_ANTENNA_CMD, + WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TX_ANTENNA_CMD, + WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TRAIN_ANTENNA_CMD, + WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_ANT_SWITCH_TBL_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_CTL_TABLE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_MIMOGAIN_TABLE_CMD, + WMI_TLV_TAG_STRUCT_FWTEST_SET_PARAM_CMD, + WMI_TLV_TAG_STRUCT_PEER_ATF_REQUEST, + WMI_TLV_TAG_STRUCT_VDEV_ATF_REQUEST, + WMI_TLV_TAG_STRUCT_PDEV_GET_ANI_CCK_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_PDEV_GET_ANI_OFDM_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_INST_RSSI_STATS_RESP, + WMI_TLV_TAG_STRUCT_MED_UTIL_REPORT_EVENT, + WMI_TLV_TAG_STRUCT_PEER_STA_PS_STATECHANGE_EVENT, + WMI_TLV_TAG_STRUCT_WDS_ADDR_EVENT, + WMI_TLV_TAG_STRUCT_PEER_RATECODE_LIST_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_TPC_EVENT, + WMI_TLV_TAG_STRUCT_ANI_OFDM_EVENT, + WMI_TLV_TAG_STRUCT_ANI_CCK_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_CHANNEL_HOPPING_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_FIPS_EVENT, + WMI_TLV_TAG_STRUCT_ATF_PEER_INFO, + WMI_TLV_TAG_STRUCT_PDEV_GET_TPC_CMD, + WMI_TLV_TAG_STRUCT_VDEV_FILTER_NRP_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_QBOOST_CFG_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SMART_ANT_GPIO_HANDLE, + WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TX_ANTENNA_SERIES, + WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TRAIN_ANTENNA_PARAM, + WMI_TLV_TAG_STRUCT_PDEV_SET_ANT_CTRL_CHAIN, + WMI_TLV_TAG_STRUCT_PEER_CCK_OFDM_RATE_INFO, + WMI_TLV_TAG_STRUCT_PEER_MCS_RATE_INFO, + WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBR, + WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBM, + WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_FREQNUM, + WMI_TLV_TAG_STRUCT_MU_REPORT_TOTAL_MU, + WMI_TLV_TAG_STRUCT_VDEV_SET_DSCP_TID_MAP_CMD, + WMI_TLV_TAG_STRUCT_ROAM_SET_MBO, + WMI_TLV_TAG_STRUCT_MIB_STATS_ENABLE_CMD, + WMI_TLV_TAG_STRUCT_NAN_DISC_IFACE_CREATED_EVENT, + WMI_TLV_TAG_STRUCT_NAN_DISC_IFACE_DELETED_EVENT, + WMI_TLV_TAG_STRUCT_NAN_STARTED_CLUSTER_EVENT, + WMI_TLV_TAG_STRUCT_NAN_JOINED_CLUSTER_EVENT, + WMI_TLV_TAG_STRUCT_NDI_GET_CAP_REQ, + WMI_TLV_TAG_STRUCT_NDP_INITIATOR_REQ, + WMI_TLV_TAG_STRUCT_NDP_RESPONDER_REQ, + WMI_TLV_TAG_STRUCT_NDP_END_REQ, + WMI_TLV_TAG_STRUCT_NDI_CAP_RSP_EVENT, + WMI_TLV_TAG_STRUCT_NDP_INITIATOR_RSP_EVENT, + WMI_TLV_TAG_STRUCT_NDP_RESPONDER_RSP_EVENT, + WMI_TLV_TAG_STRUCT_NDP_END_RSP_EVENT, + WMI_TLV_TAG_STRUCT_NDP_INDICATION_EVENT, + WMI_TLV_TAG_STRUCT_NDP_CONFIRM_EVENT, + WMI_TLV_TAG_STRUCT_NDP_END_INDICATION_EVENT, + WMI_TLV_TAG_STRUCT_VDEV_SET_QUIET_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_PCL_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_HW_MODE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_MAC_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_ANTENNA_MODE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_HW_MODE_RESPONSE_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_HW_MODE_TRANSITION_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY, + WMI_TLV_TAG_STRUCT_PDEV_SET_MAC_CONFIG_RESPONSE_EVENT, + WMI_TLV_TAG_STRUCT_COEX_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_CONFIG_ENHANCED_MCAST_FILTER, + WMI_TLV_TAG_STRUCT_CHAN_AVOID_RPT_ALLOW_CMD, + WMI_TLV_TAG_STRUCT_SET_PERIODIC_CHANNEL_STATS_CONFIG, + WMI_TLV_TAG_STRUCT_VDEV_SET_CUSTOM_AGGR_SIZE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_WAL_POWER_DEBUG_CMD, + WMI_TLV_TAG_STRUCT_MAC_PHY_CAPABILITIES, + WMI_TLV_TAG_STRUCT_HW_MODE_CAPABILITIES, + WMI_TLV_TAG_STRUCT_SOC_MAC_PHY_HW_MODE_CAPS, + WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES_EXT, + WMI_TLV_TAG_STRUCT_SOC_HAL_REG_CAPABILITIES, + WMI_TLV_TAG_STRUCT_VDEV_WISA_CMD, + WMI_TLV_TAG_STRUCT_TX_POWER_LEVEL_STATS_EVT, + WMI_TLV_TAG_STRUCT_SCAN_ADAPTIVE_DWELL_PARAMETERS_TLV, + WMI_TLV_TAG_STRUCT_SCAN_ADAPTIVE_DWELL_CONFIG, + WMI_TLV_TAG_STRUCT_WOW_SET_ACTION_WAKE_UP_CMD, + WMI_TLV_TAG_STRUCT_NDP_END_RSP_PER_NDI, + WMI_TLV_TAG_STRUCT_PEER_BWF_REQUEST, + WMI_TLV_TAG_STRUCT_BWF_PEER_INFO, + WMI_TLV_TAG_STRUCT_DBGLOG_TIME_STAMP_SYNC_CMD, + WMI_TLV_TAG_STRUCT_RMC_SET_LEADER_CMD, + WMI_TLV_TAG_STRUCT_RMC_MANUAL_LEADER_EVENT, + WMI_TLV_TAG_STRUCT_PER_CHAIN_RSSI_STATS, + WMI_TLV_TAG_STRUCT_RSSI_STATS, + WMI_TLV_TAG_STRUCT_P2P_LO_START_CMD, + WMI_TLV_TAG_STRUCT_P2P_LO_STOP_CMD, + WMI_TLV_TAG_STRUCT_P2P_LO_STOPPED_EVENT, + WMI_TLV_TAG_STRUCT_PEER_REORDER_QUEUE_SETUP_CMD, + WMI_TLV_TAG_STRUCT_PEER_REORDER_QUEUE_REMOVE_CMD, + WMI_TLV_TAG_STRUCT_SET_MULTIPLE_MCAST_FILTER_CMD, + WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_BUNDLE_EVENT, + WMI_TLV_TAG_STRUCT_READ_DATA_FROM_FLASH_CMD, + WMI_TLV_TAG_STRUCT_READ_DATA_FROM_FLASH_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_SET_REORDER_TIMEOUT_VAL_CMD, + WMI_TLV_TAG_STRUCT_PEER_SET_RX_BLOCKSIZE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_WAKEUP_CONFIG_CMDID, + WMI_TLV_TAG_STRUCT_TLV_BUF_LEN_PARAM, + WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT, + WMI_TLV_TAG_STRUCT_PEER_ANTDIV_INFO_REQ_CMD, + WMI_TLV_TAG_STRUCT_PEER_ANTDIV_INFO_EVENT, + WMI_TLV_TAG_STRUCT_PEER_ANTDIV_INFO, + WMI_TLV_TAG_STRUCT_PDEV_GET_ANTDIV_STATUS_CMD, + WMI_TLV_TAG_STRUCT_PDEV_ANTDIV_STATUS_EVENT, + WMI_TLV_TAG_STRUCT_MNT_FILTER_CMD, + WMI_TLV_TAG_STRUCT_GET_CHIP_POWER_STATS_CMD, + WMI_TLV_TAG_STRUCT_PDEV_CHIP_POWER_STATS_EVENT, + WMI_TLV_TAG_STRUCT_COEX_GET_ANTENNA_ISOLATION_CMD, + WMI_TLV_TAG_STRUCT_COEX_REPORT_ISOLATION_EVENT, + WMI_TLV_TAG_STRUCT_CHAN_CCA_STATS, + WMI_TLV_TAG_STRUCT_PEER_SIGNAL_STATS, + WMI_TLV_TAG_STRUCT_TX_STATS, + WMI_TLV_TAG_STRUCT_PEER_AC_TX_STATS, + WMI_TLV_TAG_STRUCT_RX_STATS, + WMI_TLV_TAG_STRUCT_PEER_AC_RX_STATS, + WMI_TLV_TAG_STRUCT_REPORT_STATS_EVENT, + WMI_TLV_TAG_STRUCT_CHAN_CCA_STATS_THRESH, + WMI_TLV_TAG_STRUCT_PEER_SIGNAL_STATS_THRESH, + WMI_TLV_TAG_STRUCT_TX_STATS_THRESH, + WMI_TLV_TAG_STRUCT_RX_STATS_THRESH, + WMI_TLV_TAG_STRUCT_PDEV_SET_STATS_THRESHOLD_CMD, + WMI_TLV_TAG_STRUCT_REQUEST_WLAN_STATS_CMD, + WMI_TLV_TAG_STRUCT_RX_AGGR_FAILURE_EVENT, + WMI_TLV_TAG_STRUCT_RX_AGGR_FAILURE_INFO, + WMI_TLV_TAG_STRUCT_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMD, + WMI_TLV_TAG_STRUCT_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_BAND_TO_MAC, + WMI_TLV_TAG_STRUCT_TBTT_OFFSET_INFO, + WMI_TLV_TAG_STRUCT_TBTT_OFFSET_EXT_EVENT, + WMI_TLV_TAG_STRUCT_SAR_LIMITS_CMD, + WMI_TLV_TAG_STRUCT_SAR_LIMIT_CMD_ROW, + WMI_TLV_TAG_STRUCT_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMD, + WMI_TLV_TAG_STRUCT_VDEV_ADFS_CH_CFG_CMD, + WMI_TLV_TAG_STRUCT_VDEV_ADFS_OCAC_ABORT_CMD, + WMI_TLV_TAG_STRUCT_PDEV_DFS_RADAR_DETECTION_EVENT, + WMI_TLV_TAG_STRUCT_VDEV_ADFS_OCAC_COMPLETE_EVENT, + WMI_TLV_TAG_STRUCT_VDEV_DFS_CAC_COMPLETE_EVENT, + WMI_TLV_TAG_STRUCT_VENDOR_OUI, + WMI_TLV_TAG_STRUCT_REQUEST_RCPI_CMD, + WMI_TLV_TAG_STRUCT_UPDATE_RCPI_EVENT, + WMI_TLV_TAG_STRUCT_REQUEST_PEER_STATS_INFO_CMD, + WMI_TLV_TAG_STRUCT_PEER_STATS_INFO, + WMI_TLV_TAG_STRUCT_PEER_STATS_INFO_EVENT, + WMI_TLV_TAG_STRUCT_PKGID_EVENT, + WMI_TLV_TAG_STRUCT_CONNECTED_NLO_RSSI_PARAMS, + WMI_TLV_TAG_STRUCT_SET_CURRENT_COUNTRY_CMD, + WMI_TLV_TAG_STRUCT_REGULATORY_RULE_STRUCT, + WMI_TLV_TAG_STRUCT_REG_CHAN_LIST_CC_EVENT, + WMI_TLV_TAG_STRUCT_11D_SCAN_START_CMD, + WMI_TLV_TAG_STRUCT_11D_SCAN_STOP_CMD, + WMI_TLV_TAG_STRUCT_11D_NEW_COUNTRY_EVENT, + WMI_TLV_TAG_STRUCT_REQUEST_RADIO_CHAN_STATS_CMD, + WMI_TLV_TAG_STRUCT_RADIO_CHAN_STATS, + WMI_TLV_TAG_STRUCT_RADIO_CHAN_STATS_EVENT, + WMI_TLV_TAG_STRUCT_ROAM_PER_CONFIG, + WMI_TLV_TAG_STRUCT_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMD, + WMI_TLV_TAG_STRUCT_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENT, + WMI_TLV_TAG_STRUCT_BPF_SET_VDEV_ACTIVE_MODE_CMD, + WMI_TLV_TAG_STRUCT_HW_DATA_FILTER_CMD, + WMI_TLV_TAG_STRUCT_CONNECTED_NLO_BSS_BAND_RSSI_PREF, + WMI_TLV_TAG_STRUCT_PEER_OPER_MODE_CHANGE_EVENT, + WMI_TLV_TAG_STRUCT_CHIP_POWER_SAVE_FAILURE_DETECTED, + WMI_TLV_TAG_STRUCT_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMD, + WMI_TLV_TAG_STRUCT_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_UPDATE_PKT_ROUTING_CMD, + WMI_TLV_TAG_STRUCT_PDEV_CHECK_CAL_VERSION_CMD, + WMI_TLV_TAG_STRUCT_PDEV_CHECK_CAL_VERSION_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_SET_DIVERSITY_GAIN_CMD, + WMI_TLV_TAG_STRUCT_MAC_PHY_CHAINMASK_COMBO, + WMI_TLV_TAG_STRUCT_MAC_PHY_CHAINMASK_CAPABILITY, + WMI_TLV_TAG_STRUCT_VDEV_SET_ARP_STATS_CMD, + WMI_TLV_TAG_STRUCT_VDEV_GET_ARP_STATS_CMD, + WMI_TLV_TAG_STRUCT_VDEV_GET_ARP_STATS_EVENT, + WMI_TLV_TAG_STRUCT_IFACE_OFFLOAD_STATS, + WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD_SUB_STRUCT_PARAM, + WMI_TLV_TAG_STRUCT_RSSI_CTL_EXT, + WMI_TLV_TAG_STRUCT_SINGLE_PHYERR_EXT_RX_HDR, + WMI_TLV_TAG_STRUCT_COEX_BT_ACTIVITY_EVENT, + WMI_TLV_TAG_STRUCT_VDEV_GET_TX_POWER_CMD, + WMI_TLV_TAG_STRUCT_VDEV_TX_POWER_EVENT, + WMI_TLV_TAG_STRUCT_OFFCHAN_DATA_TX_COMPL_EVENT, + WMI_TLV_TAG_STRUCT_OFFCHAN_DATA_TX_SEND_CMD, + WMI_TLV_TAG_STRUCT_TX_SEND_PARAMS, + WMI_TLV_TAG_STRUCT_HE_RATE_SET, + WMI_TLV_TAG_STRUCT_CONGESTION_STATS, + WMI_TLV_TAG_STRUCT_SET_INIT_COUNTRY_CMD, + WMI_TLV_TAG_STRUCT_SCAN_DBS_DUTY_CYCLE, + WMI_TLV_TAG_STRUCT_SCAN_DBS_DUTY_CYCLE_PARAM_TLV, + WMI_TLV_TAG_STRUCT_PDEV_DIV_GET_RSSI_ANTID, + WMI_TLV_TAG_STRUCT_THERM_THROT_CONFIG_REQUEST, + WMI_TLV_TAG_STRUCT_THERM_THROT_LEVEL_CONFIG_INFO, + WMI_TLV_TAG_STRUCT_THERM_THROT_STATS_EVENT, + WMI_TLV_TAG_STRUCT_THERM_THROT_LEVEL_STATS_INFO, + WMI_TLV_TAG_STRUCT_PDEV_DIV_RSSI_ANTID_EVENT, + WMI_TLV_TAG_STRUCT_OEM_DMA_RING_CAPABILITIES, + WMI_TLV_TAG_STRUCT_OEM_DMA_RING_CFG_REQ, + WMI_TLV_TAG_STRUCT_OEM_DMA_RING_CFG_RSP, + WMI_TLV_TAG_STRUCT_OEM_INDIRECT_DATA, + WMI_TLV_TAG_STRUCT_OEM_DMA_BUF_RELEASE, + WMI_TLV_TAG_STRUCT_OEM_DMA_BUF_RELEASE_ENTRY, + WMI_TLV_TAG_STRUCT_PDEV_BSS_CHAN_INFO_REQUEST, + WMI_TLV_TAG_STRUCT_PDEV_BSS_CHAN_INFO_EVENT, + WMI_TLV_TAG_STRUCT_ROAM_LCA_DISALLOW_CONFIG_TLV_PARAM, + WMI_TLV_TAG_STRUCT_VDEV_LIMIT_OFFCHAN_CMD, + WMI_TLV_TAG_STRUCT_ROAM_RSSI_REJECTION_OCE_CONFIG_PARAM, + WMI_TLV_TAG_STRUCT_UNIT_TEST_EVENT, + WMI_TLV_TAG_STRUCT_ROAM_FILS_OFFLOAD_TLV_PARAM, + WMI_TLV_TAG_STRUCT_PDEV_UPDATE_PMK_CACHE_CMD, + WMI_TLV_TAG_STRUCT_PMK_CACHE, + WMI_TLV_TAG_STRUCT_PDEV_UPDATE_FILS_HLP_PKT_CMD, + WMI_TLV_TAG_STRUCT_ROAM_FILS_SYNCH_TLV_PARAM, + WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_EXTENDED_TLV_PARAM, + WMI_TLV_TAG_STRUCT_ROAM_BG_SCAN_ROAMING_PARAM, + WMI_TLV_TAG_STRUCT_OIC_PING_OFFLOAD_PARAMS_CMD, + WMI_TLV_TAG_STRUCT_OIC_PING_OFFLOAD_SET_ENABLE_CMD, + WMI_TLV_TAG_STRUCT_OIC_PING_HANDOFF_EVENT, + WMI_TLV_TAG_STRUCT_DHCP_LEASE_RENEW_OFFLOAD_CMD, + WMI_TLV_TAG_STRUCT_DHCP_LEASE_RENEW_EVENT, + WMI_TLV_TAG_STRUCT_BTM_CONFIG, + WMI_TLV_TAG_STRUCT_DEBUG_MESG_FW_DATA_STALL_PARAM, + WMI_TLV_TAG_STRUCT_WLM_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_PDEV_UPDATE_CTLTABLE_REQUEST, + WMI_TLV_TAG_STRUCT_PDEV_UPDATE_CTLTABLE_EVENT, + WMI_TLV_TAG_STRUCT_ROAM_CND_SCORING_PARAM, + WMI_TLV_TAG_STRUCT_PDEV_CONFIG_VENDOR_OUI_ACTION, + WMI_TLV_TAG_STRUCT_VENDOR_OUI_EXT, + WMI_TLV_TAG_STRUCT_ROAM_SYNCH_FRAME_EVENT, + WMI_TLV_TAG_STRUCT_FD_SEND_FROM_HOST_CMD, + WMI_TLV_TAG_STRUCT_ENABLE_FILS_CMD, + WMI_TLV_TAG_STRUCT_HOST_SWFDA_EVENT, + + WMI_TLV_TAG_MAX +}; + +enum wmi_tlv_service { + WMI_TLV_SERVICE_BEACON_OFFLOAD = 0, + WMI_TLV_SERVICE_SCAN_OFFLOAD, + WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD, + WMI_TLV_SERVICE_BCN_MISS_OFFLOAD, + WMI_TLV_SERVICE_STA_PWRSAVE, + WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE, + WMI_TLV_SERVICE_AP_UAPSD, + WMI_TLV_SERVICE_AP_DFS, + WMI_TLV_SERVICE_11AC, + WMI_TLV_SERVICE_BLOCKACK, + WMI_TLV_SERVICE_PHYERR, + WMI_TLV_SERVICE_BCN_FILTER, + WMI_TLV_SERVICE_RTT, + WMI_TLV_SERVICE_WOW, + WMI_TLV_SERVICE_RATECTRL_CACHE, + WMI_TLV_SERVICE_IRAM_TIDS, + WMI_TLV_SERVICE_ARPNS_OFFLOAD, + WMI_TLV_SERVICE_NLO, + WMI_TLV_SERVICE_GTK_OFFLOAD, + WMI_TLV_SERVICE_SCAN_SCH, + WMI_TLV_SERVICE_CSA_OFFLOAD, + WMI_TLV_SERVICE_CHATTER, + WMI_TLV_SERVICE_COEX_FREQAVOID, + WMI_TLV_SERVICE_PACKET_POWER_SAVE, + WMI_TLV_SERVICE_FORCE_FW_HANG, + WMI_TLV_SERVICE_GPIO, + WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM, + WMI_TLV_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, + WMI_TLV_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, + WMI_TLV_SERVICE_STA_KEEP_ALIVE, + WMI_TLV_SERVICE_TX_ENCAP, + WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, + WMI_TLV_SERVICE_EARLY_RX, + WMI_TLV_SERVICE_STA_SMPS, + WMI_TLV_SERVICE_FWTEST, + WMI_TLV_SERVICE_STA_WMMAC, + WMI_TLV_SERVICE_TDLS, + WMI_TLV_SERVICE_BURST, + WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE, + WMI_TLV_SERVICE_ADAPTIVE_OCS, + WMI_TLV_SERVICE_BA_SSN_SUPPORT, + WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE, + WMI_TLV_SERVICE_WLAN_HB, + WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT, + WMI_TLV_SERVICE_BATCH_SCAN, + WMI_TLV_SERVICE_QPOWER, + WMI_TLV_SERVICE_PLMREQ, + WMI_TLV_SERVICE_THERMAL_MGMT, + WMI_TLV_SERVICE_RMC, + WMI_TLV_SERVICE_MHF_OFFLOAD, + WMI_TLV_SERVICE_COEX_SAR, + WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE, + WMI_TLV_SERVICE_NAN, + WMI_TLV_SERVICE_L1SS_STAT, + WMI_TLV_SERVICE_ESTIMATE_LINKSPEED, + WMI_TLV_SERVICE_OBSS_SCAN, + WMI_TLV_SERVICE_TDLS_OFFCHAN, + WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA, + WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA, + WMI_TLV_SERVICE_IBSS_PWRSAVE, + WMI_TLV_SERVICE_LPASS, + WMI_TLV_SERVICE_EXTSCAN, + WMI_TLV_SERVICE_D0WOW, + WMI_TLV_SERVICE_HSOFFLOAD, + WMI_TLV_SERVICE_ROAM_HO_OFFLOAD, + WMI_TLV_SERVICE_RX_FULL_REORDER, + WMI_TLV_SERVICE_DHCP_OFFLOAD, + WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT, + WMI_TLV_SERVICE_MDNS_OFFLOAD, + WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD, + WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT, + WMI_TLV_SERVICE_OCB, + WMI_TLV_SERVICE_AP_ARPNS_OFFLOAD, + WMI_TLV_SERVICE_PER_BAND_CHAINMASK_SUPPORT, + WMI_TLV_SERVICE_PACKET_FILTER_OFFLOAD, + WMI_TLV_SERVICE_MGMT_TX_HTT, + WMI_TLV_SERVICE_MGMT_TX_WMI, + WMI_TLV_SERVICE_EXT_MSG, + WMI_TLV_SERVICE_MAWC, + WMI_TLV_SERVICE_PEER_ASSOC_CONF, + WMI_TLV_SERVICE_EGAP, + WMI_TLV_SERVICE_STA_PMF_OFFLOAD, + WMI_TLV_SERVICE_UNIFIED_WOW_CAPABILITY, + WMI_TLV_SERVICE_ENHANCED_PROXY_STA, + WMI_TLV_SERVICE_ATF, + WMI_TLV_SERVICE_COEX_GPIO, + WMI_TLV_SERVICE_AUX_SPECTRAL_INTF, + WMI_TLV_SERVICE_AUX_CHAN_LOAD_INTF, + WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64, + WMI_TLV_SERVICE_ENTERPRISE_MESH, + WMI_TLV_SERVICE_RESTRT_CHNL_SUPPORT, + WMI_TLV_SERVICE_BPF_OFFLOAD, + WMI_TLV_SERVICE_SYNC_DELETE_CMDS, + WMI_TLV_SERVICE_SMART_ANTENNA_SW_SUPPORT, + WMI_TLV_SERVICE_SMART_ANTENNA_HW_SUPPORT, + WMI_TLV_SERVICE_RATECTRL_LIMIT_MAX_MIN_RATES, + WMI_TLV_SERVICE_NAN_DATA, + WMI_TLV_SERVICE_NAN_RTT, + WMI_TLV_SERVICE_11AX, + WMI_TLV_SERVICE_DEPRECATED_REPLACE, + WMI_TLV_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, + WMI_TLV_SERVICE_ENHANCED_MCAST_FILTER, + WMI_TLV_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, + WMI_TLV_SERVICE_MESH_11S, + WMI_TLV_SERVICE_HALF_RATE_QUARTER_RATE_SUPPORT, + WMI_TLV_SERVICE_VDEV_RX_FILTER, + WMI_TLV_SERVICE_P2P_LISTEN_OFFLOAD_SUPPORT, + WMI_TLV_SERVICE_MARK_FIRST_WAKEUP_PACKET, + WMI_TLV_SERVICE_MULTIPLE_MCAST_FILTER_SET, + WMI_TLV_SERVICE_HOST_MANAGED_RX_REORDER, + WMI_TLV_SERVICE_FLASH_RDWR_SUPPORT, + WMI_TLV_SERVICE_WLAN_STATS_REPORT, + WMI_TLV_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT, + WMI_TLV_SERVICE_DFS_PHYERR_OFFLOAD, + WMI_TLV_SERVICE_RCPI_SUPPORT, + WMI_TLV_SERVICE_FW_MEM_DUMP_SUPPORT, + WMI_TLV_SERVICE_PEER_STATS_INFO, + WMI_TLV_SERVICE_REGULATORY_DB, + WMI_TLV_SERVICE_11D_OFFLOAD, + WMI_TLV_SERVICE_HW_DATA_FILTERING, + WMI_TLV_SERVICE_MULTIPLE_VDEV_RESTART, + WMI_TLV_SERVICE_PKT_ROUTING, + WMI_TLV_SERVICE_CHECK_CAL_VERSION, + WMI_TLV_SERVICE_OFFCHAN_TX_WMI, + WMI_TLV_SERVICE_8SS_TX_BFEE, + WMI_TLV_SERVICE_EXTENDED_NSS_SUPPORT, + WMI_TLV_SERVICE_ACK_TIMEOUT, + WMI_TLV_SERVICE_PDEV_BSS_CHANNEL_INFO_64, + WMI_TLV_MAX_SERVICE = 128, + +/* NOTE: + * The above service flags are delivered in the wmi_service_bitmap field + * of the WMI_TLV_SERVICE_READY_EVENT message. + * The below service flags are delivered in a WMI_TLV_SERVICE_AVAILABLE_EVENT + * message rather than in the WMI_TLV_SERVICE_READY_EVENT message's + * wmi_service_bitmap field. + * The WMI_TLV_SERVICE_AVAILABLE_EVENT message immediately precedes the + * WMI_TLV_SERVICE_READY_EVENT message. + */ + + WMI_TLV_SERVICE_CHAN_LOAD_INFO = 128, + WMI_TLV_SERVICE_TX_PPDU_INFO_STATS_SUPPORT, + WMI_TLV_SERVICE_VDEV_LIMIT_OFFCHAN_SUPPORT, + WMI_TLV_SERVICE_FILS_SUPPORT, + WMI_TLV_SERVICE_WLAN_OIC_PING_OFFLOAD, + WMI_TLV_SERVICE_WLAN_DHCP_RENEW, + WMI_TLV_SERVICE_MAWC_SUPPORT, + WMI_TLV_SERVICE_VDEV_LATENCY_CONFIG, + WMI_TLV_SERVICE_PDEV_UPDATE_CTLTABLE_SUPPORT, + WMI_TLV_SERVICE_PKTLOG_SUPPORT_OVER_HTT, + WMI_TLV_SERVICE_VDEV_MULTI_GROUP_KEY_SUPPORT, + WMI_TLV_SERVICE_SCAN_PHYMODE_SUPPORT, + WMI_TLV_SERVICE_THERM_THROT, + WMI_TLV_SERVICE_BCN_OFFLOAD_START_STOP_SUPPORT, + WMI_TLV_SERVICE_WOW_WAKEUP_BY_TIMER_PATTERN, + WMI_TLV_SERVICE_PEER_MAP_UNMAP_V2_SUPPORT = 143, + WMI_TLV_SERVICE_OFFCHAN_DATA_TID_SUPPORT = 144, + WMI_TLV_SERVICE_RX_PROMISC_ENABLE_SUPPORT = 145, + WMI_TLV_SERVICE_SUPPORT_DIRECT_DMA = 146, + WMI_TLV_SERVICE_AP_OBSS_DETECTION_OFFLOAD = 147, + WMI_TLV_SERVICE_11K_NEIGHBOUR_REPORT_SUPPORT = 148, + WMI_TLV_SERVICE_LISTEN_INTERVAL_OFFLOAD_SUPPORT = 149, + WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD = 150, + WMI_TLV_SERVICE_RUNTIME_DPD_RECAL = 151, + WMI_TLV_SERVICE_STA_TWT = 152, + WMI_TLV_SERVICE_AP_TWT = 153, + WMI_TLV_SERVICE_GMAC_OFFLOAD_SUPPORT = 154, + WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT = 155, + WMI_TLV_SERVICE_PEER_TID_CONFIGS_SUPPORT = 156, + WMI_TLV_SERVICE_VDEV_SWRETRY_PER_AC_CONFIG_SUPPORT = 157, + WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_SCC_SUPPORT = 158, + WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_MCC_SUPPORT = 159, + WMI_TLV_SERVICE_MOTION_DET = 160, + WMI_TLV_SERVICE_INFRA_MBSSID = 161, + WMI_TLV_SERVICE_OBSS_SPATIAL_REUSE = 162, + WMI_TLV_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT = 163, + WMI_TLV_SERVICE_NAN_DBS_SUPPORT = 164, + WMI_TLV_SERVICE_NDI_DBS_SUPPORT = 165, + WMI_TLV_SERVICE_NAN_SAP_SUPPORT = 166, + WMI_TLV_SERVICE_NDI_SAP_SUPPORT = 167, + WMI_TLV_SERVICE_CFR_CAPTURE_SUPPORT = 168, + WMI_TLV_SERVICE_CFR_CAPTURE_IND_MSG_TYPE_1 = 169, + WMI_TLV_SERVICE_ESP_SUPPORT = 170, + WMI_TLV_SERVICE_PEER_CHWIDTH_CHANGE = 171, + WMI_TLV_SERVICE_WLAN_HPCS_PULSE = 172, + WMI_TLV_SERVICE_PER_VDEV_CHAINMASK_CONFIG_SUPPORT = 173, + WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI = 174, + WMI_TLV_SERVICE_NAN_DISABLE_SUPPORT = 175, + WMI_TLV_SERVICE_HTT_H2T_NO_HTC_HDR_LEN_IN_MSG_LEN = 176, + WMI_TLV_SERVICE_COEX_SUPPORT_UNEQUAL_ISOLATION = 177, + WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT = 178, + WMI_TLV_SERVICE_SUPPORT_EXTEND_ADDRESS = 179, + + WMI_TLV_MAX_EXT_SERVICE = 256, +}; + +#define WMI_TLV_EXT_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \ + ((svc_id) < (WMI_TLV_MAX_EXT_SERVICE) && \ + (svc_id) >= (len) && \ + __le32_to_cpu((wmi_svc_bmap)[((svc_id) - (len)) / 32]) & \ + BIT(((((svc_id) - (len)) % 32) & 0x1f))) + +#define SVCMAP(x, y, len) \ + do { \ + if ((WMI_SERVICE_IS_ENABLED((in), (x), (len))) || \ + (WMI_TLV_EXT_SERVICE_IS_ENABLED((in), (x), (len)))) \ + __set_bit(y, out); \ + } while (0) + +static inline void +wmi_tlv_svc_map(const __le32 *in, unsigned long *out, size_t len) +{ + SVCMAP(WMI_TLV_SERVICE_BEACON_OFFLOAD, + WMI_SERVICE_BEACON_OFFLOAD, len); + SVCMAP(WMI_TLV_SERVICE_SCAN_OFFLOAD, + WMI_SERVICE_SCAN_OFFLOAD, len); + SVCMAP(WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD, + WMI_SERVICE_ROAM_SCAN_OFFLOAD, len); + SVCMAP(WMI_TLV_SERVICE_BCN_MISS_OFFLOAD, + WMI_SERVICE_BCN_MISS_OFFLOAD, len); + SVCMAP(WMI_TLV_SERVICE_STA_PWRSAVE, + WMI_SERVICE_STA_PWRSAVE, len); + SVCMAP(WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE, + WMI_SERVICE_STA_ADVANCED_PWRSAVE, len); + SVCMAP(WMI_TLV_SERVICE_AP_UAPSD, + WMI_SERVICE_AP_UAPSD, len); + SVCMAP(WMI_TLV_SERVICE_AP_DFS, + WMI_SERVICE_AP_DFS, len); + SVCMAP(WMI_TLV_SERVICE_11AC, + WMI_SERVICE_11AC, len); + SVCMAP(WMI_TLV_SERVICE_BLOCKACK, + WMI_SERVICE_BLOCKACK, len); + SVCMAP(WMI_TLV_SERVICE_PHYERR, + WMI_SERVICE_PHYERR, len); + SVCMAP(WMI_TLV_SERVICE_BCN_FILTER, + WMI_SERVICE_BCN_FILTER, len); + SVCMAP(WMI_TLV_SERVICE_RTT, + WMI_SERVICE_RTT, len); + SVCMAP(WMI_TLV_SERVICE_WOW, + WMI_SERVICE_WOW, len); + SVCMAP(WMI_TLV_SERVICE_RATECTRL_CACHE, + WMI_SERVICE_RATECTRL_CACHE, len); + SVCMAP(WMI_TLV_SERVICE_IRAM_TIDS, + WMI_SERVICE_IRAM_TIDS, len); + SVCMAP(WMI_TLV_SERVICE_ARPNS_OFFLOAD, + WMI_SERVICE_ARPNS_OFFLOAD, len); + SVCMAP(WMI_TLV_SERVICE_NLO, + WMI_SERVICE_NLO, len); + SVCMAP(WMI_TLV_SERVICE_GTK_OFFLOAD, + WMI_SERVICE_GTK_OFFLOAD, len); + SVCMAP(WMI_TLV_SERVICE_SCAN_SCH, + WMI_SERVICE_SCAN_SCH, len); + SVCMAP(WMI_TLV_SERVICE_CSA_OFFLOAD, + WMI_SERVICE_CSA_OFFLOAD, len); + SVCMAP(WMI_TLV_SERVICE_CHATTER, + WMI_SERVICE_CHATTER, len); + SVCMAP(WMI_TLV_SERVICE_COEX_FREQAVOID, + WMI_SERVICE_COEX_FREQAVOID, len); + SVCMAP(WMI_TLV_SERVICE_PACKET_POWER_SAVE, + WMI_SERVICE_PACKET_POWER_SAVE, len); + SVCMAP(WMI_TLV_SERVICE_FORCE_FW_HANG, + WMI_SERVICE_FORCE_FW_HANG, len); + SVCMAP(WMI_TLV_SERVICE_GPIO, + WMI_SERVICE_GPIO, len); + SVCMAP(WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM, + WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, len); + SVCMAP(WMI_TLV_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, + WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len); + SVCMAP(WMI_TLV_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, + WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len); + SVCMAP(WMI_TLV_SERVICE_STA_KEEP_ALIVE, + WMI_SERVICE_STA_KEEP_ALIVE, len); + SVCMAP(WMI_TLV_SERVICE_TX_ENCAP, + WMI_SERVICE_TX_ENCAP, len); + SVCMAP(WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, + WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, len); + SVCMAP(WMI_TLV_SERVICE_EARLY_RX, + WMI_SERVICE_EARLY_RX, len); + SVCMAP(WMI_TLV_SERVICE_STA_SMPS, + WMI_SERVICE_STA_SMPS, len); + SVCMAP(WMI_TLV_SERVICE_FWTEST, + WMI_SERVICE_FWTEST, len); + SVCMAP(WMI_TLV_SERVICE_STA_WMMAC, + WMI_SERVICE_STA_WMMAC, len); + SVCMAP(WMI_TLV_SERVICE_TDLS, + WMI_SERVICE_TDLS, len); + SVCMAP(WMI_TLV_SERVICE_BURST, + WMI_SERVICE_BURST, len); + SVCMAP(WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE, + WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE, len); + SVCMAP(WMI_TLV_SERVICE_ADAPTIVE_OCS, + WMI_SERVICE_ADAPTIVE_OCS, len); + SVCMAP(WMI_TLV_SERVICE_BA_SSN_SUPPORT, + WMI_SERVICE_BA_SSN_SUPPORT, len); + SVCMAP(WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE, + WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE, len); + SVCMAP(WMI_TLV_SERVICE_WLAN_HB, + WMI_SERVICE_WLAN_HB, len); + SVCMAP(WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT, + WMI_SERVICE_LTE_ANT_SHARE_SUPPORT, len); + SVCMAP(WMI_TLV_SERVICE_BATCH_SCAN, + WMI_SERVICE_BATCH_SCAN, len); + SVCMAP(WMI_TLV_SERVICE_QPOWER, + WMI_SERVICE_QPOWER, len); + SVCMAP(WMI_TLV_SERVICE_PLMREQ, + WMI_SERVICE_PLMREQ, len); + SVCMAP(WMI_TLV_SERVICE_THERMAL_MGMT, + WMI_SERVICE_THERMAL_MGMT, len); + SVCMAP(WMI_TLV_SERVICE_RMC, + WMI_SERVICE_RMC, len); + SVCMAP(WMI_TLV_SERVICE_MHF_OFFLOAD, + WMI_SERVICE_MHF_OFFLOAD, len); + SVCMAP(WMI_TLV_SERVICE_COEX_SAR, + WMI_SERVICE_COEX_SAR, len); + SVCMAP(WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE, + WMI_SERVICE_BCN_TXRATE_OVERRIDE, len); + SVCMAP(WMI_TLV_SERVICE_NAN, + WMI_SERVICE_NAN, len); + SVCMAP(WMI_TLV_SERVICE_L1SS_STAT, + WMI_SERVICE_L1SS_STAT, len); + SVCMAP(WMI_TLV_SERVICE_ESTIMATE_LINKSPEED, + WMI_SERVICE_ESTIMATE_LINKSPEED, len); + SVCMAP(WMI_TLV_SERVICE_OBSS_SCAN, + WMI_SERVICE_OBSS_SCAN, len); + SVCMAP(WMI_TLV_SERVICE_TDLS_OFFCHAN, + WMI_SERVICE_TDLS_OFFCHAN, len); + SVCMAP(WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA, + WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, len); + SVCMAP(WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA, + WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, len); + SVCMAP(WMI_TLV_SERVICE_IBSS_PWRSAVE, + WMI_SERVICE_IBSS_PWRSAVE, len); + SVCMAP(WMI_TLV_SERVICE_LPASS, + WMI_SERVICE_LPASS, len); + SVCMAP(WMI_TLV_SERVICE_EXTSCAN, + WMI_SERVICE_EXTSCAN, len); + SVCMAP(WMI_TLV_SERVICE_D0WOW, + WMI_SERVICE_D0WOW, len); + SVCMAP(WMI_TLV_SERVICE_HSOFFLOAD, + WMI_SERVICE_HSOFFLOAD, len); + SVCMAP(WMI_TLV_SERVICE_ROAM_HO_OFFLOAD, + WMI_SERVICE_ROAM_HO_OFFLOAD, len); + SVCMAP(WMI_TLV_SERVICE_RX_FULL_REORDER, + WMI_SERVICE_RX_FULL_REORDER, len); + SVCMAP(WMI_TLV_SERVICE_DHCP_OFFLOAD, + WMI_SERVICE_DHCP_OFFLOAD, len); + SVCMAP(WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT, + WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT, len); + SVCMAP(WMI_TLV_SERVICE_MDNS_OFFLOAD, + WMI_SERVICE_MDNS_OFFLOAD, len); + SVCMAP(WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD, + WMI_SERVICE_SAP_AUTH_OFFLOAD, len); + SVCMAP(WMI_TLV_SERVICE_MGMT_TX_WMI, + WMI_SERVICE_MGMT_TX_WMI, len); + SVCMAP(WMI_TLV_SERVICE_MESH_11S, + WMI_SERVICE_MESH_11S, len); + SVCMAP(WMI_TLV_SERVICE_SYNC_DELETE_CMDS, + WMI_SERVICE_SYNC_DELETE_CMDS, len); + SVCMAP(WMI_TLV_SERVICE_PEER_STATS_INFO, + WMI_SERVICE_PEER_STATS, len); +} + +static inline void +wmi_tlv_svc_map_ext(const __le32 *in, unsigned long *out, size_t len) +{ + SVCMAP(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT, + WMI_SERVICE_SPOOF_MAC_SUPPORT, + WMI_TLV_MAX_SERVICE); + SVCMAP(WMI_TLV_SERVICE_THERM_THROT, + WMI_SERVICE_THERM_THROT, + WMI_TLV_MAX_SERVICE); + SVCMAP(WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI, + WMI_SERVICE_TX_DATA_ACK_RSSI, WMI_TLV_MAX_SERVICE); + SVCMAP(WMI_TLV_SERVICE_SUPPORT_EXTEND_ADDRESS, + WMI_SERVICE_SUPPORT_EXTEND_ADDRESS, + WMI_TLV_MAX_SERVICE); +} + +#undef SVCMAP + +struct wmi_tlv { + __le16 len; + __le16 tag; + u8 value[]; +} __packed; + +struct ath10k_mgmt_tx_pkt_addr { + void *vaddr; + dma_addr_t paddr; +}; + +struct chan_info_params { + u32 err_code; + u32 freq; + u32 cmd_flags; + u32 noise_floor; + u32 rx_clear_count; + u32 cycle_count; + u32 mac_clk_mhz; +}; + +#define WMI_TLV_FLAG_MGMT_BUNDLE_TX_COMPL BIT(9) + +struct wmi_tlv_chan_info_event { + __le32 err_code; + __le32 freq; + __le32 cmd_flags; + __le32 noise_floor; + __le32 rx_clear_count; + __le32 cycle_count; + __le32 chan_tx_pwr_range; + __le32 chan_tx_pwr_tp; + __le32 rx_frame_count; + __le32 my_bss_rx_cycle_count; + __le32 rx_11b_mode_data_duration; + __le32 tx_frame_cnt; + __le32 mac_clk_mhz; +} __packed; + +struct wmi_tlv_mgmt_tx_compl_ev { + __le32 desc_id; + __le32 status; + __le32 pdev_id; + __le32 ppdu_id; + __le32 ack_rssi; +}; + +#define WMI_TLV_MGMT_RX_NUM_RSSI 4 + +struct wmi_tlv_mgmt_rx_ev { + __le32 channel; + __le32 snr; + __le32 rate; + __le32 phy_mode; + __le32 buf_len; + __le32 status; + __le32 rssi[WMI_TLV_MGMT_RX_NUM_RSSI]; +} __packed; + +struct wmi_tlv_abi_version { + __le32 abi_ver0; + __le32 abi_ver1; + __le32 abi_ver_ns0; + __le32 abi_ver_ns1; + __le32 abi_ver_ns2; + __le32 abi_ver_ns3; +} __packed; + +enum wmi_tlv_hw_bd_id { + WMI_TLV_HW_BD_LEGACY = 0, + WMI_TLV_HW_BD_QCA6174 = 1, + WMI_TLV_HW_BD_QCA2582 = 2, +}; + +struct wmi_tlv_hw_bd_info { + u8 rev; + u8 project_id; + u8 custom_id; + u8 reference_design_id; +} __packed; + +struct wmi_tlv_svc_rdy_ev { + __le32 fw_build_vers; + struct wmi_tlv_abi_version abi; + __le32 phy_capability; + __le32 max_frag_entry; + __le32 num_rf_chains; + __le32 ht_cap_info; + __le32 vht_cap_info; + __le32 vht_supp_mcs; + __le32 hw_min_tx_power; + __le32 hw_max_tx_power; + __le32 sys_cap_info; + __le32 min_pkt_size_enable; + __le32 max_bcn_ie_size; + __le32 num_mem_reqs; + __le32 max_num_scan_chans; + __le32 hw_bd_id; /* 0 means hw_bd_info is invalid */ + struct wmi_tlv_hw_bd_info hw_bd_info[5]; +} __packed; + +struct wmi_tlv_rdy_ev { + struct wmi_tlv_abi_version abi; + struct wmi_mac_addr mac_addr; + __le32 status; +} __packed; + +struct wmi_tlv_resource_config { + __le32 num_vdevs; + __le32 num_peers; + __le32 num_offload_peers; + __le32 num_offload_reorder_bufs; + __le32 num_peer_keys; + __le32 num_tids; + __le32 ast_skid_limit; + __le32 tx_chain_mask; + __le32 rx_chain_mask; + __le32 rx_timeout_pri[4]; + __le32 rx_decap_mode; + __le32 scan_max_pending_reqs; + __le32 bmiss_offload_max_vdev; + __le32 roam_offload_max_vdev; + __le32 roam_offload_max_ap_profiles; + __le32 num_mcast_groups; + __le32 num_mcast_table_elems; + __le32 mcast2ucast_mode; + __le32 tx_dbg_log_size; + __le32 num_wds_entries; + __le32 dma_burst_size; + __le32 mac_aggr_delim; + __le32 rx_skip_defrag_timeout_dup_detection_check; + __le32 vow_config; + __le32 gtk_offload_max_vdev; + __le32 num_msdu_desc; + __le32 max_frag_entries; + __le32 num_tdls_vdevs; + __le32 num_tdls_conn_table_entries; + __le32 beacon_tx_offload_max_vdev; + __le32 num_multicast_filter_entries; + __le32 num_wow_filters; + __le32 num_keep_alive_pattern; + __le32 keep_alive_pattern_size; + __le32 max_tdls_concurrent_sleep_sta; + __le32 max_tdls_concurrent_buffer_sta; + __le32 wmi_send_separate; + __le32 num_ocb_vdevs; + __le32 num_ocb_channels; + __le32 num_ocb_schedules; + __le32 host_capab; +} __packed; + +/* structure describing host memory chunk. */ +struct host_memory_chunk_tlv { + /* id of the request that is passed up in service ready */ + __le32 req_id; + + /* the physical address the memory chunk */ + __le32 ptr; + + /* size of the chunk */ + __le32 size; + + /* the upper 32 bit address valid only for more than 32 bit target */ + __le32 ptr_high; +} __packed; + +struct wmi_tlv_init_cmd { + struct wmi_tlv_abi_version abi; + __le32 num_host_mem_chunks; +} __packed; + +struct wmi_tlv_pdev_get_temp_cmd { + __le32 pdev_id; /* not used */ +} __packed; + +struct wmi_tlv_pdev_temperature_event { + __le32 tlv_hdr; + /* temperature value in Celsius degree */ + __le32 temperature; + __le32 pdev_id; +} __packed; + +struct wmi_tlv_pdev_set_param_cmd { + __le32 pdev_id; /* not used yet */ + __le32 param_id; + __le32 param_value; +} __packed; + +struct wmi_tlv_pdev_set_rd_cmd { + __le32 pdev_id; /* not used yet */ + __le32 regd; + __le32 regd_2ghz; + __le32 regd_5ghz; + __le32 conform_limit_2ghz; + __le32 conform_limit_5ghz; +} __packed; + +struct wmi_tlv_scan_chan_list_cmd { + __le32 num_scan_chans; +} __packed; + +struct wmi_scan_prob_req_oui_cmd { +/* OUI to be used in Probe Request frame when random MAC address is + * requested part of scan parameters. This is applied to both FW internal + * scans and host initiated scans. Host can request for random MAC address + * with WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ flag. + */ + __le32 prob_req_oui; +} __packed; + +struct wmi_tlv_start_scan_cmd { + struct wmi_start_scan_common common; + __le32 burst_duration_ms; + __le32 num_channels; + __le32 num_bssids; + __le32 num_ssids; + __le32 ie_len; + __le32 num_probes; + struct wmi_mac_addr mac_addr; + struct wmi_mac_addr mac_mask; +} __packed; + +enum wmi_tlv_vdev_subtype { + WMI_TLV_VDEV_SUBTYPE_NONE = 0, + WMI_TLV_VDEV_SUBTYPE_P2P_DEV = 1, + WMI_TLV_VDEV_SUBTYPE_P2P_CLI = 2, + WMI_TLV_VDEV_SUBTYPE_P2P_GO = 3, + WMI_TLV_VDEV_SUBTYPE_PROXY_STA = 4, + WMI_TLV_VDEV_SUBTYPE_MESH = 5, + WMI_TLV_VDEV_SUBTYPE_MESH_11S = 6, +}; + +struct wmi_tlv_vdev_start_cmd { + __le32 vdev_id; + __le32 requestor_id; + __le32 bcn_intval; + __le32 dtim_period; + __le32 flags; + struct wmi_ssid ssid; + __le32 bcn_tx_rate; + __le32 bcn_tx_power; + __le32 num_noa_descr; + __le32 disable_hw_ack; +} __packed; + +enum { + WMI_TLV_PEER_TYPE_DEFAULT = 0, /* generic / non-BSS / self-peer */ + WMI_TLV_PEER_TYPE_BSS = 1, + WMI_TLV_PEER_TYPE_TDLS = 2, + WMI_TLV_PEER_TYPE_HOST_MAX = 127, + WMI_TLV_PEER_TYPE_ROAMOFFLOAD_TMP = 128, +}; + +struct wmi_tlv_peer_create_cmd { + __le32 vdev_id; + struct wmi_mac_addr peer_addr; + __le32 peer_type; +} __packed; + +struct wmi_tlv_peer_assoc_cmd { + struct wmi_mac_addr mac_addr; + __le32 vdev_id; + __le32 new_assoc; + __le32 assoc_id; + __le32 flags; + __le32 caps; + __le32 listen_intval; + __le32 ht_caps; + __le32 max_mpdu; + __le32 mpdu_density; + __le32 rate_caps; + __le32 nss; + __le32 vht_caps; + __le32 phy_mode; + __le32 ht_info[2]; + __le32 num_legacy_rates; + __le32 num_ht_rates; +} __packed; + +struct wmi_tlv_pdev_suspend { + __le32 pdev_id; /* not used yet */ + __le32 opt; +} __packed; + +struct wmi_tlv_pdev_set_wmm_cmd { + __le32 pdev_id; /* not used yet */ + __le32 dg_type; /* no idea.. */ +} __packed; + +struct wmi_tlv_vdev_wmm_params { + __le32 dummy; + struct wmi_wmm_params params; +} __packed; + +struct wmi_tlv_vdev_set_wmm_cmd { + __le32 vdev_id; + struct wmi_tlv_vdev_wmm_params vdev_wmm_params[4]; +} __packed; + +struct wmi_tlv_phyerr_ev { + __le32 num_phyerrs; + __le32 tsf_l32; + __le32 tsf_u32; + __le32 buf_len; +} __packed; + +enum wmi_tlv_dbglog_param { + WMI_TLV_DBGLOG_PARAM_LOG_LEVEL = 1, + WMI_TLV_DBGLOG_PARAM_VDEV_ENABLE, + WMI_TLV_DBGLOG_PARAM_VDEV_DISABLE, + WMI_TLV_DBGLOG_PARAM_VDEV_ENABLE_BITMAP, + WMI_TLV_DBGLOG_PARAM_VDEV_DISABLE_BITMAP, +}; + +enum wmi_tlv_dbglog_log_level { + WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE = 0, + WMI_TLV_DBGLOG_LOG_LEVEL_INFO, + WMI_TLV_DBGLOG_LOG_LEVEL_INFO_LVL_1, + WMI_TLV_DBGLOG_LOG_LEVEL_INFO_LVL_2, + WMI_TLV_DBGLOG_LOG_LEVEL_WARN, + WMI_TLV_DBGLOG_LOG_LEVEL_ERR, +}; + +#define WMI_TLV_DBGLOG_BITMAP_MAX_IDS 512 +#define WMI_TLV_DBGLOG_BITMAP_MAX_WORDS (WMI_TLV_DBGLOG_BITMAP_MAX_IDS / \ + sizeof(__le32)) +#define WMI_TLV_DBGLOG_ALL_MODULES 0xffff +#define WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(module_id, log_level) \ + (((module_id << 16) & 0xffff0000) | \ + ((log_level << 0) & 0x000000ff)) + +struct wmi_tlv_dbglog_cmd { + __le32 param; + __le32 value; +} __packed; + +struct wmi_tlv_resume_cmd { + __le32 reserved; +} __packed; + +struct wmi_tlv_req_stats_cmd { + __le32 stats_id; /* wmi_stats_id */ + __le32 vdev_id; + struct wmi_mac_addr peer_macaddr; +} __packed; + +#define WMI_TLV_PEER_RX_DURATION_HIGH_VALID_BIT 31 +#define WMI_TLV_PEER_RX_DURATION_HIGH_MASK GENMASK(30, 0) +#define WMI_TLV_PEER_RX_DURATION_SHIFT 32 + +struct wmi_tlv_peer_stats_extd { + struct wmi_mac_addr peer_macaddr; + __le32 rx_duration; + __le32 peer_tx_bytes; + __le32 peer_rx_bytes; + __le32 last_tx_rate_code; + __le32 last_tx_power; + __le32 rx_mc_bc_cnt; + __le32 rx_duration_high; + __le32 reserved[2]; +} __packed; + +struct wmi_tlv_vdev_stats { + __le32 vdev_id; + __le32 beacon_snr; + __le32 data_snr; + __le32 num_tx_frames[4]; /* per-AC */ + __le32 num_rx_frames; + __le32 num_tx_frames_retries[4]; + __le32 num_tx_frames_failures[4]; + __le32 num_rts_fail; + __le32 num_rts_success; + __le32 num_rx_err; + __le32 num_rx_discard; + __le32 num_tx_not_acked; + __le32 tx_rate_history[10]; + __le32 beacon_rssi_history[10]; +} __packed; + +struct wmi_tlv_pktlog_enable { + __le32 reserved; + __le32 filter; +} __packed; + +struct wmi_tlv_pktlog_disable { + __le32 reserved; +} __packed; + +enum wmi_tlv_bcn_tx_status { + WMI_TLV_BCN_TX_STATUS_OK, + WMI_TLV_BCN_TX_STATUS_XRETRY, + WMI_TLV_BCN_TX_STATUS_DROP, + WMI_TLV_BCN_TX_STATUS_FILTERED, +}; + +struct wmi_tlv_bcn_tx_status_ev { + __le32 vdev_id; + __le32 tx_status; +} __packed; + +struct wmi_tlv_bcn_prb_info { + __le32 caps; + __le32 erp; + u8 ies[]; +} __packed; + +struct wmi_tlv_bcn_tmpl_cmd { + __le32 vdev_id; + __le32 tim_ie_offset; + __le32 buf_len; +} __packed; + +struct wmi_tlv_prb_tmpl_cmd { + __le32 vdev_id; + __le32 buf_len; +} __packed; + +struct wmi_tlv_p2p_go_bcn_ie { + __le32 vdev_id; + __le32 ie_len; +} __packed; + +enum wmi_tlv_diag_item_type { + WMI_TLV_DIAG_ITEM_TYPE_FW_EVENT, + WMI_TLV_DIAG_ITEM_TYPE_FW_LOG, + WMI_TLV_DIAG_ITEM_TYPE_FW_DEBUG_MSG, +}; + +struct wmi_tlv_diag_item { + u8 type; + u8 reserved; + __le16 len; + __le32 timestamp; + __le32 code; + u8 payload[]; +} __packed; + +struct wmi_tlv_diag_data_ev { + __le32 num_items; +} __packed; + +struct wmi_tlv_sta_keepalive_cmd { + __le32 vdev_id; + __le32 enabled; + __le32 method; /* WMI_STA_KEEPALIVE_METHOD_ */ + __le32 interval; /* in seconds */ +} __packed; + +struct wmi_tlv_stats_ev { + __le32 stats_id; /* WMI_STAT_ */ + __le32 num_pdev_stats; + __le32 num_vdev_stats; + __le32 num_peer_stats; + __le32 num_bcnflt_stats; + __le32 num_chan_stats; + __le32 num_mib_stats; + __le32 pdev_id; + __le32 num_bcn_stats; + __le32 num_peer_stats_extd; +} __packed; + +struct wmi_tlv_peer_stats_info_ev { + __le32 vdev_id; + __le32 num_peers; + __le32 more_data; +} __packed; + +#define WMI_TLV_MAX_CHAINS 8 + +struct wmi_tlv_peer_stats_info { + struct wmi_mac_addr peer_macaddr; + struct { + /* lower 32 bits of the tx_bytes value */ + __le32 low_32; + /* upper 32 bits of the tx_bytes value */ + __le32 high_32; + } __packed tx_bytes; + struct { + /* lower 32 bits of the tx_packets value */ + __le32 low_32; + /* upper 32 bits of the tx_packets value */ + __le32 high_32; + } __packed tx_packets; + struct { + /* lower 32 bits of the rx_bytes value */ + __le32 low_32; + /* upper 32 bits of the rx_bytes value */ + __le32 high_32; + } __packed rx_bytes; + struct { + /* lower 32 bits of the rx_packets value */ + __le32 low_32; + /* upper 32 bits of the rx_packets value */ + __le32 high_32; + } __packed rx_packets; + __le32 tx_retries; + __le32 tx_failed; + + /* rate information, it is output of WMI_ASSEMBLE_RATECODE_V1 + * (in format of 0x1000RRRR) + * The rate-code is a 4-bytes field in which, + * for given rate, nss and preamble + * + * b'31-b'29 unused / reserved + * b'28 indicate the version of rate-code (1 = RATECODE_V1) + * b'27-b'11 unused / reserved + * b'10-b'8 indicate the preamble (0 OFDM, 1 CCK, 2 HT, 3 VHT) + * b'7-b'5 indicate the NSS (0 - 1x1, 1 - 2x2, 2 - 3x3, 3 - 4x4) + * b'4-b'0 indicate the rate, which is indicated as follows: + * OFDM : 0: OFDM 48 Mbps + * 1: OFDM 24 Mbps + * 2: OFDM 12 Mbps + * 3: OFDM 6 Mbps + * 4: OFDM 54 Mbps + * 5: OFDM 36 Mbps + * 6: OFDM 18 Mbps + * 7: OFDM 9 Mbps + * CCK (pream == 1) + * 0: CCK 11 Mbps Long + * 1: CCK 5.5 Mbps Long + * 2: CCK 2 Mbps Long + * 3: CCK 1 Mbps Long + * 4: CCK 11 Mbps Short + * 5: CCK 5.5 Mbps Short + * 6: CCK 2 Mbps Short + * HT/VHT (pream == 2/3) + * 0..7: MCS0..MCS7 (HT) + * 0..9: MCS0..MCS9 (11AC VHT) + * 0..11: MCS0..MCS11 (11AX VHT) + * rate-code of the last transmission + */ + __le32 last_tx_rate_code; + __le32 last_rx_rate_code; + __le32 last_tx_bitrate_kbps; + __le32 last_rx_bitrate_kbps; + __le32 peer_rssi; + __le32 tx_succeed; + __le32 peer_rssi_per_chain[WMI_TLV_MAX_CHAINS]; +} __packed; + +#define HW_RATECODE_PREAM_V1_MASK GENMASK(10, 8) +#define WMI_TLV_GET_HW_RC_PREAM_V1(rc) FIELD_GET(HW_RATECODE_PREAM_V1_MASK, rc) + +#define HW_RATECODE_NSS_V1_MASK GENMASK(7, 5) +#define WMI_TLV_GET_HW_RC_NSS_V1(rc) FIELD_GET(HW_RATECODE_NSS_V1_MASK, rc) + +#define HW_RATECODE_RATE_V1_MASK GENMASK(4, 0) +#define WMI_TLV_GET_HW_RC_RATE_V1(rc) FIELD_GET(HW_RATECODE_RATE_V1_MASK, rc) + +struct wmi_tlv_p2p_noa_ev { + __le32 vdev_id; +} __packed; + +struct wmi_tlv_roam_ev { + __le32 vdev_id; + __le32 reason; + __le32 rssi; +} __packed; + +struct wmi_tlv_wow_add_del_event_cmd { + __le32 vdev_id; + __le32 is_add; + __le32 event_bitmap; +} __packed; + +struct wmi_tlv_request_peer_stats_info { + __le32 request_type; + __le32 vdev_id; + /* peer MAC address */ + struct wmi_mac_addr peer_macaddr; + __le32 reset_after_request; +} __packed; + +/* Command to set/unset chip in quiet mode */ +struct wmi_tlv_set_quiet_cmd { + __le32 vdev_id; + + /* in TUs */ + __le32 period; + + /* in TUs */ + __le32 duration; + + /* offset in TUs */ + __le32 next_start; + __le32 enabled; +} __packed; + +enum wmi_tlv_wow_interface_cfg { + WOW_IFACE_PAUSE_ENABLED, + WOW_IFACE_PAUSE_DISABLED +}; + +struct wmi_tlv_wow_enable_cmd { + __le32 enable; + __le32 pause_iface_config; + __le32 flags; +} __packed; + +struct wmi_tlv_wow_host_wakeup_ind { + __le32 reserved; +} __packed; + +struct wmi_tlv_wow_event_info { + __le32 vdev_id; + __le32 flag; + __le32 wake_reason; + __le32 data_len; +} __packed; + +enum wmi_tlv_pattern_type { + WOW_PATTERN_MIN = 0, + WOW_BITMAP_PATTERN = WOW_PATTERN_MIN, + WOW_IPV4_SYNC_PATTERN, + WOW_IPV6_SYNC_PATTERN, + WOW_WILD_CARD_PATTERN, + WOW_TIMER_PATTERN, + WOW_MAGIC_PATTERN, + WOW_IPV6_RA_PATTERN, + WOW_IOAC_PKT_PATTERN, + WOW_IOAC_TMR_PATTERN, + WOW_PATTERN_MAX +}; + +#define WOW_DEFAULT_BITMAP_PATTERN_SIZE 148 +#define WOW_DEFAULT_BITMASK_SIZE 148 + +struct wmi_tlv_wow_bitmap_pattern { + u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE]; + u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE]; + __le32 pattern_offset; + __le32 pattern_len; + __le32 bitmask_len; + __le32 pattern_id; +} __packed; + +struct wmi_tlv_wow_add_pattern_cmd { + __le32 vdev_id; + __le32 pattern_id; + __le32 pattern_type; +} __packed; + +struct wmi_tlv_wow_del_pattern_cmd { + __le32 vdev_id; + __le32 pattern_id; + __le32 pattern_type; +} __packed; + +/* TDLS Options */ +enum wmi_tlv_tdls_options { + WMI_TLV_TDLS_OFFCHAN_EN = BIT(0), + WMI_TLV_TDLS_BUFFER_STA_EN = BIT(1), + WMI_TLV_TDLS_SLEEP_STA_EN = BIT(2), +}; + +struct wmi_tdls_set_state_cmd { + __le32 vdev_id; + __le32 state; + __le32 notification_interval_ms; + __le32 tx_discovery_threshold; + __le32 tx_teardown_threshold; + __le32 rssi_teardown_threshold; + __le32 rssi_delta; + __le32 tdls_options; + __le32 tdls_peer_traffic_ind_window; + __le32 tdls_peer_traffic_response_timeout_ms; + __le32 tdls_puapsd_mask; + __le32 tdls_puapsd_inactivity_time_ms; + __le32 tdls_puapsd_rx_frame_threshold; +} __packed; + +struct wmi_tdls_peer_update_cmd { + __le32 vdev_id; + struct wmi_mac_addr peer_macaddr; + __le32 peer_state; +} __packed; + +enum { + WMI_TLV_TDLS_PEER_QOS_AC_VO = BIT(0), + WMI_TLV_TDLS_PEER_QOS_AC_VI = BIT(1), + WMI_TLV_TDLS_PEER_QOS_AC_BK = BIT(2), + WMI_TLV_TDLS_PEER_QOS_AC_BE = BIT(3), +}; + +#define WMI_TLV_TDLS_PEER_SP_MASK 0x60 +#define WMI_TLV_TDLS_PEER_SP_LSB 5 + +struct wmi_tdls_peer_capab { + __le32 peer_qos; + __le32 buff_sta_support; + __le32 off_chan_support; + __le32 peer_curr_operclass; + __le32 self_curr_operclass; + __le32 peer_chan_len; + __le32 peer_operclass_len; + u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES]; + __le32 is_peer_responder; + __le32 pref_offchan_num; + __le32 pref_offchan_bw; +} __packed; + +struct wmi_tlv_adaptive_qcs { + __le32 enable; +} __packed; + +/** + * enum wmi_tlv_tx_pause_id - firmware tx queue pause reason types + * + * @WMI_TLV_TX_PAUSE_ID_MCC: used for by multi-channel firmware scheduler. + * Only vdev_map is valid. + * @WMI_TLV_TX_PAUSE_ID_AP_PEER_PS: peer in AP mode is asleep. + * Only peer_id is valid. + * @WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD: Only peer_id and tid_map are valid. + * @WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA: Only vdev_map is valid. + * @WMI_TLV_TX_PAUSE_ID_P2P_GO_PS: Only vdev_map is valid. + * @WMI_TLV_TX_PAUSE_ID_STA_ADD_BA: Only peer_id and tid_map are valid. + * @WMI_TLV_TX_PAUSE_ID_AP_PS: When all peers are asleep in AP mode. Only + * vdev_map is valid. + * @WMI_TLV_TX_PAUSE_ID_IBSS_PS: When all peers are asleep in IBSS mode. Only + * vdev_map is valid. + * @WMI_TLV_TX_PAUSE_ID_HOST: Host itself requested tx pause. + */ +enum wmi_tlv_tx_pause_id { + WMI_TLV_TX_PAUSE_ID_MCC = 1, + WMI_TLV_TX_PAUSE_ID_AP_PEER_PS = 2, + WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD = 3, + WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA = 4, + WMI_TLV_TX_PAUSE_ID_P2P_GO_PS = 5, + WMI_TLV_TX_PAUSE_ID_STA_ADD_BA = 6, + WMI_TLV_TX_PAUSE_ID_AP_PS = 7, + WMI_TLV_TX_PAUSE_ID_IBSS_PS = 8, + WMI_TLV_TX_PAUSE_ID_HOST = 21, +}; + +enum wmi_tlv_tx_pause_action { + WMI_TLV_TX_PAUSE_ACTION_STOP, + WMI_TLV_TX_PAUSE_ACTION_WAKE, +}; + +struct wmi_tlv_tx_pause_ev { + __le32 pause_id; + __le32 action; + __le32 vdev_map; + __le32 peer_id; + __le32 tid_map; +} __packed; + +struct wmi_tlv_tdls_peer_event { + struct wmi_mac_addr peer_macaddr; + __le32 peer_status; + __le32 peer_reason; + __le32 vdev_id; +} __packed; + +enum wmi_tlv_sys_cap_info_flags { + WMI_TLV_SYS_CAP_INFO_RXTX_LED = BIT(0), + WMI_TLV_SYS_CAP_INFO_RFKILL = BIT(1), +}; + +#define WMI_TLV_RFKILL_CFG_GPIO_PIN_NUM GENMASK(5, 0) +#define WMI_TLV_RFKILL_CFG_RADIO_LEVEL BIT(6) +#define WMI_TLV_RFKILL_CFG_PIN_AS_GPIO GENMASK(10, 7) + +enum wmi_tlv_rfkill_enable_radio { + WMI_TLV_RFKILL_ENABLE_RADIO_ON = 0, + WMI_TLV_RFKILL_ENABLE_RADIO_OFF = 1, +}; + +enum wmi_tlv_rfkill_radio_state { + WMI_TLV_RFKILL_RADIO_STATE_OFF = 1, + WMI_TLV_RFKILL_RADIO_STATE_ON = 2, +}; + +struct wmi_tlv_rfkill_state_change_ev { + __le32 gpio_pin_num; + __le32 int_type; + __le32 radio_state; +}; + +void ath10k_wmi_tlv_attach(struct ath10k *ar); + +enum wmi_nlo_auth_algorithm { + WMI_NLO_AUTH_ALGO_80211_OPEN = 1, + WMI_NLO_AUTH_ALGO_80211_SHARED_KEY = 2, + WMI_NLO_AUTH_ALGO_WPA = 3, + WMI_NLO_AUTH_ALGO_WPA_PSK = 4, + WMI_NLO_AUTH_ALGO_WPA_NONE = 5, + WMI_NLO_AUTH_ALGO_RSNA = 6, + WMI_NLO_AUTH_ALGO_RSNA_PSK = 7, +}; + +enum wmi_nlo_cipher_algorithm { + WMI_NLO_CIPHER_ALGO_NONE = 0x00, + WMI_NLO_CIPHER_ALGO_WEP40 = 0x01, + WMI_NLO_CIPHER_ALGO_TKIP = 0x02, + WMI_NLO_CIPHER_ALGO_CCMP = 0x04, + WMI_NLO_CIPHER_ALGO_WEP104 = 0x05, + WMI_NLO_CIPHER_ALGO_BIP = 0x06, + WMI_NLO_CIPHER_ALGO_RSN_USE_GROUP = 0x100, + WMI_NLO_CIPHER_ALGO_WEP = 0x101, +}; + +/* SSID broadcast type passed in NLO params */ +enum wmi_nlo_ssid_bcastnwtype { + WMI_NLO_BCAST_UNKNOWN = 0, + WMI_NLO_BCAST_NORMAL = 1, + WMI_NLO_BCAST_HIDDEN = 2, +}; + +#define WMI_NLO_MAX_SSIDS 16 +#define WMI_NLO_MAX_CHAN 48 + +#define WMI_NLO_CONFIG_STOP (0x1 << 0) +#define WMI_NLO_CONFIG_START (0x1 << 1) +#define WMI_NLO_CONFIG_RESET (0x1 << 2) +#define WMI_NLO_CONFIG_SLOW_SCAN (0x1 << 4) +#define WMI_NLO_CONFIG_FAST_SCAN (0x1 << 5) +#define WMI_NLO_CONFIG_SSID_HIDE_EN (0x1 << 6) + +/* This bit is used to indicate if EPNO or supplicant PNO is enabled. + * Only one of them can be enabled at a given time + */ +#define WMI_NLO_CONFIG_ENLO (0x1 << 7) +#define WMI_NLO_CONFIG_SCAN_PASSIVE (0x1 << 8) +#define WMI_NLO_CONFIG_ENLO_RESET (0x1 << 9) +#define WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ (0x1 << 10) +#define WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ (0x1 << 11) +#define WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ (0x1 << 12) +#define WMI_NLO_CONFIG_ENABLE_CNLO_RSSI_CONFIG (0x1 << 13) + +/* Whether directed scan needs to be performed (for hidden SSIDs) */ +#define WMI_ENLO_FLAG_DIRECTED_SCAN 1 + +/* Whether PNO event shall be triggered if the network is found on A band */ +#define WMI_ENLO_FLAG_A_BAND 2 + +/* Whether PNO event shall be triggered if the network is found on G band */ +#define WMI_ENLO_FLAG_G_BAND 4 + +/* Whether strict matching is required (i.e. firmware shall not + * match on the entire SSID) + */ +#define WMI_ENLO_FLAG_STRICT_MATCH 8 + +/* Code for matching the beacon AUTH IE - additional codes TBD */ +/* open */ +#define WMI_ENLO_AUTH_CODE_OPEN 1 + +/* WPA_PSK or WPA2PSK */ +#define WMI_ENLO_AUTH_CODE_PSK 2 + +/* any EAPOL */ +#define WMI_ENLO_AUTH_CODE_EAPOL 4 + +struct wmi_nlo_ssid_param { + __le32 valid; + struct wmi_ssid ssid; +} __packed; + +struct wmi_nlo_enc_param { + __le32 valid; + __le32 enc_type; +} __packed; + +struct wmi_nlo_auth_param { + __le32 valid; + __le32 auth_type; +} __packed; + +struct wmi_nlo_bcast_nw_param { + __le32 valid; + + /* If WMI_NLO_CONFIG_EPNO is not set. Supplicant PNO is enabled. + * The value should be true/false. Otherwise EPNO is enabled. + * bcast_nw_type would be used as a bit flag contains WMI_ENLO_FLAG_XXX + */ + __le32 bcast_nw_type; +} __packed; + +struct wmi_nlo_rssi_param { + __le32 valid; + __le32 rssi; +} __packed; + +struct nlo_configured_parameters { + /* TLV tag and len;*/ + __le32 tlv_header; + struct wmi_nlo_ssid_param ssid; + struct wmi_nlo_enc_param enc_type; + struct wmi_nlo_auth_param auth_type; + struct wmi_nlo_rssi_param rssi_cond; + + /* indicates if the SSID is hidden or not */ + struct wmi_nlo_bcast_nw_param bcast_nw_type; +} __packed; + +/* Support channel prediction for PNO scan after scanning top_k_num channels + * if stationary_threshold is met. + */ +struct nlo_channel_prediction_cfg { + __le32 tlv_header; + + /* Enable or disable this feature. */ + __le32 enable; + + /* Top K channels will be scanned before deciding whether to further scan + * or stop. Minimum value is 3 and maximum is 5. + */ + __le32 top_k_num; + + /* Preconfigured stationary threshold. + * Lesser value means more conservative. Bigger value means more aggressive. + * Maximum is 100 and minimum is 0. + */ + __le32 stationary_threshold; + + /* Periodic full channel scan in milliseconds unit. + * After full_scan_period_ms since last full scan, channel prediction + * scan is suppressed and will do full scan. + * This is to help detecting sudden AP power-on or -off. Value 0 means no + * full scan at all (not recommended). + */ + __le32 full_scan_period_ms; +} __packed; + +struct enlo_candidate_score_params_t { + __le32 tlv_header; /* TLV tag and len; */ + + /* minimum 5GHz RSSI for a BSSID to be considered (units = dBm) */ + __le32 min_5ghz_rssi; + + /* minimum 2.4GHz RSSI for a BSSID to be considered (units = dBm) */ + __le32 min_24ghz_rssi; + + /* the maximum score that a network can have before bonuses */ + __le32 initial_score_max; + + /* current_connection_bonus: + * only report when there is a network's score this much higher + * than the current connection + */ + __le32 current_connection_bonus; + + /* score bonus for all networks with the same network flag */ + __le32 same_network_bonus; + + /* score bonus for networks that are not open */ + __le32 secure_bonus; + + /* 5GHz RSSI score bonus (applied to all 5GHz networks) */ + __le32 band_5ghz_bonus; +} __packed; + +struct connected_nlo_bss_band_rssi_pref_t { + __le32 tlv_header; /* TLV tag and len;*/ + + /* band which needs to get preference over other band + * - see wmi_set_vdev_ie_band enum + */ + __le32 band; + + /* Amount of RSSI preference (in dB) that can be given to a band */ + __le32 rssi_pref; +} __packed; + +struct connected_nlo_rssi_params_t { + __le32 tlv_header; /* TLV tag and len;*/ + + /* Relative rssi threshold (in dB) by which new BSS should have + * better rssi than the current connected BSS. + */ + __le32 relative_rssi; + + /* The amount of rssi preference (in dB) that can be given + * to a 5G BSS over 2.4G BSS. + */ + __le32 relative_rssi_5g_pref; +} __packed; + +struct wmi_tlv_wow_nlo_config_cmd { + __le32 flags; + __le32 vdev_id; + __le32 fast_scan_max_cycles; + __le32 active_dwell_time; + __le32 passive_dwell_time; /* PDT in msecs */ + __le32 probe_bundle_size; + + /* ART = IRT */ + __le32 rest_time; + + /* Max value that can be reached after SBM */ + __le32 max_rest_time; + + /* SBM */ + __le32 scan_backoff_multiplier; + + /* SCBM */ + __le32 fast_scan_period; + + /* specific to windows */ + __le32 slow_scan_period; + + __le32 no_of_ssids; + + __le32 num_of_channels; + + /* NLO scan start delay time in milliseconds */ + __le32 delay_start_time; + + /** MAC Address to use in Probe Req as SA **/ + struct wmi_mac_addr mac_addr; + + /** Mask on which MAC has to be randomized **/ + struct wmi_mac_addr mac_mask; + + /** IE bitmap to use in Probe Req **/ + __le32 ie_bitmap[8]; + + /** Number of vendor OUIs. In the TLV vendor_oui[] **/ + __le32 num_vendor_oui; + + /** Number of connected NLO band preferences **/ + __le32 num_cnlo_band_pref; + + /* The TLVs will follow. + * nlo_configured_parameters nlo_list[]; + * A_UINT32 channel_list[num_of_channels]; + * nlo_channel_prediction_cfg ch_prediction_cfg; + * enlo_candidate_score_params candidate_score_params; + * wmi_vendor_oui vendor_oui[num_vendor_oui]; + * connected_nlo_rssi_params cnlo_rssi_params; + * connected_nlo_bss_band_rssi_pref cnlo_bss_band_rssi_pref[num_cnlo_band_pref]; + */ +} __packed; + +struct wmi_tlv_mgmt_tx_cmd { + __le32 vdev_id; + __le32 desc_id; + __le32 chanfreq; + __le64 paddr; + __le32 frame_len; + __le32 buf_len; +} __packed; +#endif diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 7d4b7987422d..b4aad6604d6d 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1,73 +1,1821 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include <linux/skbuff.h> +#include <linux/ctype.h> #include "core.h" #include "htc.h" #include "debug.h" #include "wmi.h" +#include "wmi-tlv.h" #include "mac.h" - -void ath10k_wmi_flush_tx(struct ath10k *ar) +#include "testmode.h" +#include "wmi-ops.h" +#include "p2p.h" +#include "hw.h" +#include "hif.h" +#include "txrx.h" + +#define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9 +#define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ) +#define ATH10K_WMI_DFS_CONF_TIMEOUT_HZ (HZ / 6) + +/* MAIN WMI cmd track */ +static struct wmi_cmd_map wmi_cmd_map = { + .init_cmdid = WMI_INIT_CMDID, + .start_scan_cmdid = WMI_START_SCAN_CMDID, + .stop_scan_cmdid = WMI_STOP_SCAN_CMDID, + .scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID, + .scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID, + .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID, + .pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID, + .pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID, + .pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID, + .pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID, + .pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID, + .pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID, + .pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID, + .pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID, + .pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID, + .pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID, + .pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID, + .pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID, + .vdev_create_cmdid = WMI_VDEV_CREATE_CMDID, + .vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID, + .vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID, + .vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID, + .vdev_up_cmdid = WMI_VDEV_UP_CMDID, + .vdev_stop_cmdid = WMI_VDEV_STOP_CMDID, + .vdev_down_cmdid = WMI_VDEV_DOWN_CMDID, + .vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID, + .vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID, + .peer_create_cmdid = WMI_PEER_CREATE_CMDID, + .peer_delete_cmdid = WMI_PEER_DELETE_CMDID, + .peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID, + .peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID, + .peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID, + .peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID, + .peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID, + .peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID, + .bcn_tx_cmdid = WMI_BCN_TX_CMDID, + .pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID, + .bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID, + .bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID, + .prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID, + .mgmt_tx_cmdid = WMI_MGMT_TX_CMDID, + .prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID, + .addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID, + .addba_send_cmdid = WMI_ADDBA_SEND_CMDID, + .addba_status_cmdid = WMI_ADDBA_STATUS_CMDID, + .delba_send_cmdid = WMI_DELBA_SEND_CMDID, + .addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID, + .send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID, + .sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID, + .sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID, + .sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID, + .pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID, + .pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID, + .roam_scan_mode = WMI_ROAM_SCAN_MODE, + .roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD, + .roam_scan_period = WMI_ROAM_SCAN_PERIOD, + .roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + .roam_ap_profile = WMI_ROAM_AP_PROFILE, + .ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE, + .ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE, + .ofl_scan_period = WMI_OFL_SCAN_PERIOD, + .p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO, + .p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY, + .p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE, + .p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE, + .p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID, + .ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID, + .ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID, + .peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID, + .wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID, + .wlan_profile_set_hist_intvl_cmdid = + WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + .wlan_profile_get_profile_data_cmdid = + WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + .wlan_profile_enable_profile_id_cmdid = + WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + .wlan_profile_list_profile_id_cmdid = + WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, + .pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID, + .pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID, + .add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID, + .rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID, + .wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID, + .wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID, + .wow_enable_disable_wake_event_cmdid = + WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, + .wow_enable_cmdid = WMI_WOW_ENABLE_CMDID, + .wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, + .rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID, + .rtt_tsf_cmdid = WMI_RTT_TSF_CMDID, + .vdev_spectral_scan_configure_cmdid = + WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, + .vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, + .request_stats_cmdid = WMI_REQUEST_STATS_CMDID, + .set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID, + .network_list_offload_config_cmdid = + WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID, + .gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID, + .csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID, + .csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID, + .chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID, + .peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID, + .peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID, + .sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID, + .sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID, + .sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD, + .echo_cmdid = WMI_ECHO_CMDID, + .pdev_utf_cmdid = WMI_PDEV_UTF_CMDID, + .dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID, + .pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID, + .pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID, + .vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID, + .vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID, + .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID, + .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID, + .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID, + .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED, + .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED, + .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED, + .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .oem_req_cmdid = WMI_CMD_UNSUPPORTED, + .nan_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED, + .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED, + .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED, + .fwtest_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED, + .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED, + .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED, + .radar_found_cmdid = WMI_CMD_UNSUPPORTED, +}; + +/* 10.X WMI cmd track */ +static struct wmi_cmd_map wmi_10x_cmd_map = { + .init_cmdid = WMI_10X_INIT_CMDID, + .start_scan_cmdid = WMI_10X_START_SCAN_CMDID, + .stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID, + .scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID, + .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED, + .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID, + .pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID, + .pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID, + .pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID, + .pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID, + .pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID, + .pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID, + .pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID, + .pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID, + .pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID, + .pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID, + .pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID, + .pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID, + .vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID, + .vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID, + .vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID, + .vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID, + .vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID, + .vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID, + .vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID, + .vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID, + .vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID, + .peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID, + .peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID, + .peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID, + .peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID, + .peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID, + .peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID, + .peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID, + .peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID, + .bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID, + .pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID, + .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED, + .bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID, + .prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID, + .mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID, + .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED, + .addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID, + .addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID, + .addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID, + .delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID, + .addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID, + .send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID, + .sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID, + .sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID, + .sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID, + .pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID, + .pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID, + .roam_scan_mode = WMI_10X_ROAM_SCAN_MODE, + .roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD, + .roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD, + .roam_scan_rssi_change_threshold = + WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + .roam_ap_profile = WMI_10X_ROAM_AP_PROFILE, + .ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE, + .ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE, + .ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD, + .p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO, + .p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY, + .p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE, + .p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE, + .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED, + .ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID, + .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED, + .peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID, + .wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID, + .wlan_profile_set_hist_intvl_cmdid = + WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + .wlan_profile_get_profile_data_cmdid = + WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + .wlan_profile_enable_profile_id_cmdid = + WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + .wlan_profile_list_profile_id_cmdid = + WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, + .pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID, + .pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID, + .add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID, + .rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID, + .wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID, + .wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID, + .wow_enable_disable_wake_event_cmdid = + WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, + .wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID, + .wow_hostwakeup_from_sleep_cmdid = + WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, + .rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID, + .rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID, + .vdev_spectral_scan_configure_cmdid = + WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, + .vdev_spectral_scan_enable_cmdid = + WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, + .request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID, + .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED, + .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED, + .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED, + .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED, + .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED, + .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED, + .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED, + .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED, + .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED, + .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED, + .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED, + .echo_cmdid = WMI_10X_ECHO_CMDID, + .pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID, + .dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID, + .pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID, + .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED, + .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID, + .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID, + .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED, + .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED, + .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED, + .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .oem_req_cmdid = WMI_CMD_UNSUPPORTED, + .nan_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED, + .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED, + .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED, + .fwtest_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED, + .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED, + .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED, + .radar_found_cmdid = WMI_CMD_UNSUPPORTED, +}; + +/* 10.2.4 WMI cmd track */ +static struct wmi_cmd_map wmi_10_2_4_cmd_map = { + .init_cmdid = WMI_10_2_INIT_CMDID, + .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID, + .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID, + .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID, + .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED, + .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID, + .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID, + .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID, + .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID, + .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID, + .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID, + .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID, + .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID, + .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID, + .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID, + .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID, + .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID, + .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID, + .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID, + .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID, + .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID, + .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID, + .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID, + .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID, + .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID, + .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID, + .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID, + .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID, + .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID, + .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID, + .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID, + .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID, + .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID, + .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID, + .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID, + .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID, + .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED, + .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID, + .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID, + .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID, + .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED, + .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID, + .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID, + .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID, + .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID, + .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID, + .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID, + .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID, + .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID, + .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID, + .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID, + .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID, + .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE, + .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD, + .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD, + .roam_scan_rssi_change_threshold = + WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE, + .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE, + .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE, + .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD, + .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO, + .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY, + .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE, + .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE, + .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED, + .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID, + .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED, + .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID, + .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID, + .wlan_profile_set_hist_intvl_cmdid = + WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + .wlan_profile_get_profile_data_cmdid = + WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + .wlan_profile_enable_profile_id_cmdid = + WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + .wlan_profile_list_profile_id_cmdid = + WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, + .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID, + .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID, + .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID, + .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID, + .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID, + .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID, + .wow_enable_disable_wake_event_cmdid = + WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, + .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID, + .wow_hostwakeup_from_sleep_cmdid = + WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, + .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID, + .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID, + .vdev_spectral_scan_configure_cmdid = + WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, + .vdev_spectral_scan_enable_cmdid = + WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, + .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID, + .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED, + .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED, + .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED, + .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED, + .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED, + .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED, + .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED, + .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED, + .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED, + .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED, + .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED, + .echo_cmdid = WMI_10_2_ECHO_CMDID, + .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID, + .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID, + .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID, + .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED, + .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID, + .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID, + .pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID, + .pdev_enable_adaptive_cca_cmdid = WMI_10_2_SET_CCA_PARAMS, + .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED, + .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED, + .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .oem_req_cmdid = WMI_CMD_UNSUPPORTED, + .nan_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED, + .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED, + .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED, + .fwtest_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED, + .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED, + .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_bss_chan_info_request_cmdid = + WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, + .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED, + .radar_found_cmdid = WMI_CMD_UNSUPPORTED, + .set_bb_timing_cmdid = WMI_10_2_PDEV_SET_BB_TIMING_CONFIG_CMDID, +}; + +/* 10.4 WMI cmd track */ +static struct wmi_cmd_map wmi_10_4_cmd_map = { + .init_cmdid = WMI_10_4_INIT_CMDID, + .start_scan_cmdid = WMI_10_4_START_SCAN_CMDID, + .stop_scan_cmdid = WMI_10_4_STOP_SCAN_CMDID, + .scan_chan_list_cmdid = WMI_10_4_SCAN_CHAN_LIST_CMDID, + .scan_sch_prio_tbl_cmdid = WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID, + .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_regdomain_cmdid = WMI_10_4_PDEV_SET_REGDOMAIN_CMDID, + .pdev_set_channel_cmdid = WMI_10_4_PDEV_SET_CHANNEL_CMDID, + .pdev_set_param_cmdid = WMI_10_4_PDEV_SET_PARAM_CMDID, + .pdev_pktlog_enable_cmdid = WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID, + .pdev_pktlog_disable_cmdid = WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID, + .pdev_set_wmm_params_cmdid = WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID, + .pdev_set_ht_cap_ie_cmdid = WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID, + .pdev_set_vht_cap_ie_cmdid = WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID, + .pdev_set_dscp_tid_map_cmdid = WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID, + .pdev_set_quiet_mode_cmdid = WMI_10_4_PDEV_SET_QUIET_MODE_CMDID, + .pdev_green_ap_ps_enable_cmdid = WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID, + .pdev_get_tpc_config_cmdid = WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID, + .pdev_set_base_macaddr_cmdid = WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID, + .vdev_create_cmdid = WMI_10_4_VDEV_CREATE_CMDID, + .vdev_delete_cmdid = WMI_10_4_VDEV_DELETE_CMDID, + .vdev_start_request_cmdid = WMI_10_4_VDEV_START_REQUEST_CMDID, + .vdev_restart_request_cmdid = WMI_10_4_VDEV_RESTART_REQUEST_CMDID, + .vdev_up_cmdid = WMI_10_4_VDEV_UP_CMDID, + .vdev_stop_cmdid = WMI_10_4_VDEV_STOP_CMDID, + .vdev_down_cmdid = WMI_10_4_VDEV_DOWN_CMDID, + .vdev_set_param_cmdid = WMI_10_4_VDEV_SET_PARAM_CMDID, + .vdev_install_key_cmdid = WMI_10_4_VDEV_INSTALL_KEY_CMDID, + .peer_create_cmdid = WMI_10_4_PEER_CREATE_CMDID, + .peer_delete_cmdid = WMI_10_4_PEER_DELETE_CMDID, + .peer_flush_tids_cmdid = WMI_10_4_PEER_FLUSH_TIDS_CMDID, + .peer_set_param_cmdid = WMI_10_4_PEER_SET_PARAM_CMDID, + .peer_assoc_cmdid = WMI_10_4_PEER_ASSOC_CMDID, + .peer_add_wds_entry_cmdid = WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID, + .peer_remove_wds_entry_cmdid = WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID, + .peer_mcast_group_cmdid = WMI_10_4_PEER_MCAST_GROUP_CMDID, + .bcn_tx_cmdid = WMI_10_4_BCN_TX_CMDID, + .pdev_send_bcn_cmdid = WMI_10_4_PDEV_SEND_BCN_CMDID, + .bcn_tmpl_cmdid = WMI_10_4_BCN_PRB_TMPL_CMDID, + .bcn_filter_rx_cmdid = WMI_10_4_BCN_FILTER_RX_CMDID, + .prb_req_filter_rx_cmdid = WMI_10_4_PRB_REQ_FILTER_RX_CMDID, + .mgmt_tx_cmdid = WMI_10_4_MGMT_TX_CMDID, + .prb_tmpl_cmdid = WMI_10_4_PRB_TMPL_CMDID, + .addba_clear_resp_cmdid = WMI_10_4_ADDBA_CLEAR_RESP_CMDID, + .addba_send_cmdid = WMI_10_4_ADDBA_SEND_CMDID, + .addba_status_cmdid = WMI_10_4_ADDBA_STATUS_CMDID, + .delba_send_cmdid = WMI_10_4_DELBA_SEND_CMDID, + .addba_set_resp_cmdid = WMI_10_4_ADDBA_SET_RESP_CMDID, + .send_singleamsdu_cmdid = WMI_10_4_SEND_SINGLEAMSDU_CMDID, + .sta_powersave_mode_cmdid = WMI_10_4_STA_POWERSAVE_MODE_CMDID, + .sta_powersave_param_cmdid = WMI_10_4_STA_POWERSAVE_PARAM_CMDID, + .sta_mimo_ps_mode_cmdid = WMI_10_4_STA_MIMO_PS_MODE_CMDID, + .pdev_dfs_enable_cmdid = WMI_10_4_PDEV_DFS_ENABLE_CMDID, + .pdev_dfs_disable_cmdid = WMI_10_4_PDEV_DFS_DISABLE_CMDID, + .roam_scan_mode = WMI_10_4_ROAM_SCAN_MODE, + .roam_scan_rssi_threshold = WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD, + .roam_scan_period = WMI_10_4_ROAM_SCAN_PERIOD, + .roam_scan_rssi_change_threshold = + WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + .roam_ap_profile = WMI_10_4_ROAM_AP_PROFILE, + .ofl_scan_add_ap_profile = WMI_10_4_OFL_SCAN_ADD_AP_PROFILE, + .ofl_scan_remove_ap_profile = WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE, + .ofl_scan_period = WMI_10_4_OFL_SCAN_PERIOD, + .p2p_dev_set_device_info = WMI_10_4_P2P_DEV_SET_DEVICE_INFO, + .p2p_dev_set_discoverability = WMI_10_4_P2P_DEV_SET_DISCOVERABILITY, + .p2p_go_set_beacon_ie = WMI_10_4_P2P_GO_SET_BEACON_IE, + .p2p_go_set_probe_resp_ie = WMI_10_4_P2P_GO_SET_PROBE_RESP_IE, + .p2p_set_vendor_ie_data_cmdid = WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID, + .ap_ps_peer_param_cmdid = WMI_10_4_AP_PS_PEER_PARAM_CMDID, + .ap_ps_peer_uapsd_coex_cmdid = WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID, + .peer_rate_retry_sched_cmdid = WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID, + .wlan_profile_trigger_cmdid = WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID, + .wlan_profile_set_hist_intvl_cmdid = + WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + .wlan_profile_get_profile_data_cmdid = + WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + .wlan_profile_enable_profile_id_cmdid = + WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + .wlan_profile_list_profile_id_cmdid = + WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, + .pdev_suspend_cmdid = WMI_10_4_PDEV_SUSPEND_CMDID, + .pdev_resume_cmdid = WMI_10_4_PDEV_RESUME_CMDID, + .add_bcn_filter_cmdid = WMI_10_4_ADD_BCN_FILTER_CMDID, + .rmv_bcn_filter_cmdid = WMI_10_4_RMV_BCN_FILTER_CMDID, + .wow_add_wake_pattern_cmdid = WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID, + .wow_del_wake_pattern_cmdid = WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID, + .wow_enable_disable_wake_event_cmdid = + WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, + .wow_enable_cmdid = WMI_10_4_WOW_ENABLE_CMDID, + .wow_hostwakeup_from_sleep_cmdid = + WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, + .rtt_measreq_cmdid = WMI_10_4_RTT_MEASREQ_CMDID, + .rtt_tsf_cmdid = WMI_10_4_RTT_TSF_CMDID, + .vdev_spectral_scan_configure_cmdid = + WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, + .vdev_spectral_scan_enable_cmdid = + WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, + .request_stats_cmdid = WMI_10_4_REQUEST_STATS_CMDID, + .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED, + .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED, + .gtk_offload_cmdid = WMI_10_4_GTK_OFFLOAD_CMDID, + .csa_offload_enable_cmdid = WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID, + .csa_offload_chanswitch_cmdid = WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID, + .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED, + .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED, + .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED, + .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED, + .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED, + .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED, + .echo_cmdid = WMI_10_4_ECHO_CMDID, + .pdev_utf_cmdid = WMI_10_4_PDEV_UTF_CMDID, + .dbglog_cfg_cmdid = WMI_10_4_DBGLOG_CFG_CMDID, + .pdev_qvit_cmdid = WMI_10_4_PDEV_QVIT_CMDID, + .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_set_keepalive_cmdid = WMI_10_4_VDEV_SET_KEEPALIVE_CMDID, + .vdev_get_keepalive_cmdid = WMI_10_4_VDEV_GET_KEEPALIVE_CMDID, + .force_fw_hang_cmdid = WMI_10_4_FORCE_FW_HANG_CMDID, + .gpio_config_cmdid = WMI_10_4_GPIO_CONFIG_CMDID, + .gpio_output_cmdid = WMI_10_4_GPIO_OUTPUT_CMDID, + .pdev_get_temperature_cmdid = WMI_10_4_PDEV_GET_TEMPERATURE_CMDID, + .vdev_set_wmm_params_cmdid = WMI_CMD_UNSUPPORTED, + .adaptive_qcs_cmdid = WMI_CMD_UNSUPPORTED, + .scan_update_request_cmdid = WMI_10_4_SCAN_UPDATE_REQUEST_CMDID, + .vdev_standby_response_cmdid = WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID, + .vdev_resume_response_cmdid = WMI_10_4_VDEV_RESUME_RESPONSE_CMDID, + .wlan_peer_caching_add_peer_cmdid = + WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID, + .wlan_peer_caching_evict_peer_cmdid = + WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID, + .wlan_peer_caching_restore_peer_cmdid = + WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID, + .wlan_peer_caching_print_all_peers_info_cmdid = + WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID, + .peer_update_wds_entry_cmdid = WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID, + .peer_add_proxy_sta_entry_cmdid = + WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID, + .rtt_keepalive_cmdid = WMI_10_4_RTT_KEEPALIVE_CMDID, + .oem_req_cmdid = WMI_10_4_OEM_REQ_CMDID, + .nan_cmdid = WMI_10_4_NAN_CMDID, + .vdev_ratemask_cmdid = WMI_10_4_VDEV_RATEMASK_CMDID, + .qboost_cfg_cmdid = WMI_10_4_QBOOST_CFG_CMDID, + .pdev_smart_ant_enable_cmdid = WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID, + .pdev_smart_ant_set_rx_antenna_cmdid = + WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID, + .peer_smart_ant_set_tx_antenna_cmdid = + WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID, + .peer_smart_ant_set_train_info_cmdid = + WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID, + .peer_smart_ant_set_node_config_ops_cmdid = + WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID, + .pdev_set_antenna_switch_table_cmdid = + WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID, + .pdev_set_ctl_table_cmdid = WMI_10_4_PDEV_SET_CTL_TABLE_CMDID, + .pdev_set_mimogain_table_cmdid = WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID, + .pdev_ratepwr_table_cmdid = WMI_10_4_PDEV_RATEPWR_TABLE_CMDID, + .pdev_ratepwr_chainmsk_table_cmdid = + WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID, + .pdev_fips_cmdid = WMI_10_4_PDEV_FIPS_CMDID, + .tt_set_conf_cmdid = WMI_10_4_TT_SET_CONF_CMDID, + .fwtest_cmdid = WMI_10_4_FWTEST_CMDID, + .vdev_atf_request_cmdid = WMI_10_4_VDEV_ATF_REQUEST_CMDID, + .peer_atf_request_cmdid = WMI_10_4_PEER_ATF_REQUEST_CMDID, + .pdev_get_ani_cck_config_cmdid = WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID, + .pdev_get_ani_ofdm_config_cmdid = + WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID, + .pdev_reserve_ast_entry_cmdid = WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID, + .pdev_get_nfcal_power_cmdid = WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID, + .pdev_get_tpc_cmdid = WMI_10_4_PDEV_GET_TPC_CMDID, + .pdev_get_ast_info_cmdid = WMI_10_4_PDEV_GET_AST_INFO_CMDID, + .vdev_set_dscp_tid_map_cmdid = WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID, + .pdev_get_info_cmdid = WMI_10_4_PDEV_GET_INFO_CMDID, + .vdev_get_info_cmdid = WMI_10_4_VDEV_GET_INFO_CMDID, + .vdev_filter_neighbor_rx_packets_cmdid = + WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID, + .mu_cal_start_cmdid = WMI_10_4_MU_CAL_START_CMDID, + .set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID, + .pdev_bss_chan_info_request_cmdid = + WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, + .ext_resource_cfg_cmdid = WMI_10_4_EXT_RESOURCE_CFG_CMDID, + .vdev_set_ie_cmdid = WMI_10_4_VDEV_SET_IE_CMDID, + .set_lteu_config_cmdid = WMI_10_4_SET_LTEU_CONFIG_CMDID, + .atf_ssid_grouping_request_cmdid = + WMI_10_4_ATF_SSID_GROUPING_REQUEST_CMDID, + .peer_atf_ext_request_cmdid = WMI_10_4_PEER_ATF_EXT_REQUEST_CMDID, + .set_periodic_channel_stats_cfg_cmdid = + WMI_10_4_SET_PERIODIC_CHANNEL_STATS_CONFIG, + .peer_bwf_request_cmdid = WMI_10_4_PEER_BWF_REQUEST_CMDID, + .btcoex_cfg_cmdid = WMI_10_4_BTCOEX_CFG_CMDID, + .peer_tx_mu_txmit_count_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_COUNT_CMDID, + .peer_tx_mu_txmit_rstcnt_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_RSTCNT_CMDID, + .peer_gid_userpos_list_cmdid = WMI_10_4_PEER_GID_USERPOS_LIST_CMDID, + .pdev_check_cal_version_cmdid = WMI_10_4_PDEV_CHECK_CAL_VERSION_CMDID, + .coex_version_cfg_cmid = WMI_10_4_COEX_VERSION_CFG_CMID, + .pdev_get_rx_filter_cmdid = WMI_10_4_PDEV_GET_RX_FILTER_CMDID, + .pdev_extended_nss_cfg_cmdid = WMI_10_4_PDEV_EXTENDED_NSS_CFG_CMDID, + .vdev_set_scan_nac_rssi_cmdid = WMI_10_4_VDEV_SET_SCAN_NAC_RSSI_CMDID, + .prog_gpio_band_select_cmdid = WMI_10_4_PROG_GPIO_BAND_SELECT_CMDID, + .config_smart_logging_cmdid = WMI_10_4_CONFIG_SMART_LOGGING_CMDID, + .debug_fatal_condition_cmdid = WMI_10_4_DEBUG_FATAL_CONDITION_CMDID, + .get_tsf_timer_cmdid = WMI_10_4_GET_TSF_TIMER_CMDID, + .pdev_get_tpc_table_cmdid = WMI_10_4_PDEV_GET_TPC_TABLE_CMDID, + .vdev_sifs_trigger_time_cmdid = WMI_10_4_VDEV_SIFS_TRIGGER_TIME_CMDID, + .pdev_wds_entry_list_cmdid = WMI_10_4_PDEV_WDS_ENTRY_LIST_CMDID, + .tdls_set_state_cmdid = WMI_10_4_TDLS_SET_STATE_CMDID, + .tdls_peer_update_cmdid = WMI_10_4_TDLS_PEER_UPDATE_CMDID, + .tdls_set_offchan_mode_cmdid = WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID, + .radar_found_cmdid = WMI_10_4_RADAR_FOUND_CMDID, + .per_peer_per_tid_config_cmdid = WMI_10_4_PER_PEER_PER_TID_CONFIG_CMDID, +}; + +static struct wmi_peer_param_map wmi_peer_param_map = { + .smps_state = WMI_PEER_SMPS_STATE, + .ampdu = WMI_PEER_AMPDU, + .authorize = WMI_PEER_AUTHORIZE, + .chan_width = WMI_PEER_CHAN_WIDTH, + .nss = WMI_PEER_NSS, + .use_4addr = WMI_PEER_USE_4ADDR, + .use_fixed_power = WMI_PEER_USE_FIXED_PWR, + .debug = WMI_PEER_DEBUG, + .phymode = WMI_PEER_PHYMODE, + .dummy_var = WMI_PEER_DUMMY_VAR, +}; + +/* MAIN WMI VDEV param map */ +static struct wmi_vdev_param_map wmi_vdev_param_map = { + .rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD, + .fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD, + .beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL, + .listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL, + .multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE, + .mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE, + .slot_time = WMI_VDEV_PARAM_SLOT_TIME, + .preamble = WMI_VDEV_PARAM_PREAMBLE, + .swba_time = WMI_VDEV_PARAM_SWBA_TIME, + .wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD, + .wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME, + .wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL, + .dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD, + .wmi_vdev_oc_scheduler_air_time_limit = + WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, + .wds = WMI_VDEV_PARAM_WDS, + .atim_window = WMI_VDEV_PARAM_ATIM_WINDOW, + .bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX, + .bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT, + .bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT, + .feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM, + .chwidth = WMI_VDEV_PARAM_CHWIDTH, + .chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET, + .disable_htprotection = WMI_VDEV_PARAM_DISABLE_HTPROTECTION, + .sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT, + .mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE, + .protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE, + .fixed_rate = WMI_VDEV_PARAM_FIXED_RATE, + .sgi = WMI_VDEV_PARAM_SGI, + .ldpc = WMI_VDEV_PARAM_LDPC, + .tx_stbc = WMI_VDEV_PARAM_TX_STBC, + .rx_stbc = WMI_VDEV_PARAM_RX_STBC, + .intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD, + .def_keyid = WMI_VDEV_PARAM_DEF_KEYID, + .nss = WMI_VDEV_PARAM_NSS, + .bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE, + .mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE, + .mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE, + .dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE, + .unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE, + .ap_keepalive_min_idle_inactive_time_secs = + WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, + .ap_keepalive_max_idle_inactive_time_secs = + WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, + .ap_keepalive_max_unresponsive_time_secs = + WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, + .ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS, + .mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED, + .enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS, + .txbf = WMI_VDEV_PARAM_TXBF, + .packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE, + .drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY, + .tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE, + .ap_detect_out_of_sync_sleeping_sta_time_secs = + WMI_VDEV_PARAM_UNSUPPORTED, + .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED, + .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED, + .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED, + .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED, + .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED, + .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED, + .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED, + .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, + .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, + .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED, + .rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED, +}; + +/* 10.X WMI VDEV param map */ +static struct wmi_vdev_param_map wmi_10x_vdev_param_map = { + .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD, + .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD, + .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL, + .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL, + .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE, + .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE, + .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME, + .preamble = WMI_10X_VDEV_PARAM_PREAMBLE, + .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME, + .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD, + .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME, + .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL, + .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD, + .wmi_vdev_oc_scheduler_air_time_limit = + WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, + .wds = WMI_10X_VDEV_PARAM_WDS, + .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW, + .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX, + .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED, + .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED, + .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM, + .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH, + .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET, + .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION, + .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT, + .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE, + .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE, + .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE, + .sgi = WMI_10X_VDEV_PARAM_SGI, + .ldpc = WMI_10X_VDEV_PARAM_LDPC, + .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC, + .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC, + .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD, + .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID, + .nss = WMI_10X_VDEV_PARAM_NSS, + .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE, + .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE, + .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE, + .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE, + .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE, + .ap_keepalive_min_idle_inactive_time_secs = + WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, + .ap_keepalive_max_idle_inactive_time_secs = + WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, + .ap_keepalive_max_unresponsive_time_secs = + WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, + .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS, + .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET, + .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS, + .txbf = WMI_VDEV_PARAM_UNSUPPORTED, + .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED, + .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED, + .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED, + .ap_detect_out_of_sync_sleeping_sta_time_secs = + WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, + .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED, + .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED, + .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED, + .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED, + .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED, + .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED, + .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED, + .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, + .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, + .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED, + .rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED, +}; + +static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = { + .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD, + .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD, + .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL, + .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL, + .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE, + .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE, + .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME, + .preamble = WMI_10X_VDEV_PARAM_PREAMBLE, + .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME, + .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD, + .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME, + .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL, + .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD, + .wmi_vdev_oc_scheduler_air_time_limit = + WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, + .wds = WMI_10X_VDEV_PARAM_WDS, + .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW, + .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX, + .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED, + .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED, + .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM, + .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH, + .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET, + .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION, + .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT, + .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE, + .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE, + .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE, + .sgi = WMI_10X_VDEV_PARAM_SGI, + .ldpc = WMI_10X_VDEV_PARAM_LDPC, + .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC, + .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC, + .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD, + .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID, + .nss = WMI_10X_VDEV_PARAM_NSS, + .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE, + .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE, + .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE, + .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE, + .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE, + .ap_keepalive_min_idle_inactive_time_secs = + WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, + .ap_keepalive_max_idle_inactive_time_secs = + WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, + .ap_keepalive_max_unresponsive_time_secs = + WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, + .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS, + .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET, + .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS, + .txbf = WMI_VDEV_PARAM_UNSUPPORTED, + .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED, + .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED, + .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED, + .ap_detect_out_of_sync_sleeping_sta_time_secs = + WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, + .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED, + .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED, + .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED, + .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED, + .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED, + .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED, + .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED, + .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, + .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, + .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED, + .rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED, +}; + +static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = { + .rts_threshold = WMI_10_4_VDEV_PARAM_RTS_THRESHOLD, + .fragmentation_threshold = WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD, + .beacon_interval = WMI_10_4_VDEV_PARAM_BEACON_INTERVAL, + .listen_interval = WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL, + .multicast_rate = WMI_10_4_VDEV_PARAM_MULTICAST_RATE, + .mgmt_tx_rate = WMI_10_4_VDEV_PARAM_MGMT_TX_RATE, + .slot_time = WMI_10_4_VDEV_PARAM_SLOT_TIME, + .preamble = WMI_10_4_VDEV_PARAM_PREAMBLE, + .swba_time = WMI_10_4_VDEV_PARAM_SWBA_TIME, + .wmi_vdev_stats_update_period = WMI_10_4_VDEV_STATS_UPDATE_PERIOD, + .wmi_vdev_pwrsave_ageout_time = WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME, + .wmi_vdev_host_swba_interval = WMI_10_4_VDEV_HOST_SWBA_INTERVAL, + .dtim_period = WMI_10_4_VDEV_PARAM_DTIM_PERIOD, + .wmi_vdev_oc_scheduler_air_time_limit = + WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, + .wds = WMI_10_4_VDEV_PARAM_WDS, + .atim_window = WMI_10_4_VDEV_PARAM_ATIM_WINDOW, + .bmiss_count_max = WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX, + .bmiss_first_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT, + .bmiss_final_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT, + .feature_wmm = WMI_10_4_VDEV_PARAM_FEATURE_WMM, + .chwidth = WMI_10_4_VDEV_PARAM_CHWIDTH, + .chextoffset = WMI_10_4_VDEV_PARAM_CHEXTOFFSET, + .disable_htprotection = WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION, + .sta_quickkickout = WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT, + .mgmt_rate = WMI_10_4_VDEV_PARAM_MGMT_RATE, + .protection_mode = WMI_10_4_VDEV_PARAM_PROTECTION_MODE, + .fixed_rate = WMI_10_4_VDEV_PARAM_FIXED_RATE, + .sgi = WMI_10_4_VDEV_PARAM_SGI, + .ldpc = WMI_10_4_VDEV_PARAM_LDPC, + .tx_stbc = WMI_10_4_VDEV_PARAM_TX_STBC, + .rx_stbc = WMI_10_4_VDEV_PARAM_RX_STBC, + .intra_bss_fwd = WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD, + .def_keyid = WMI_10_4_VDEV_PARAM_DEF_KEYID, + .nss = WMI_10_4_VDEV_PARAM_NSS, + .bcast_data_rate = WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE, + .mcast_data_rate = WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE, + .mcast_indicate = WMI_10_4_VDEV_PARAM_MCAST_INDICATE, + .dhcp_indicate = WMI_10_4_VDEV_PARAM_DHCP_INDICATE, + .unknown_dest_indicate = WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE, + .ap_keepalive_min_idle_inactive_time_secs = + WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, + .ap_keepalive_max_idle_inactive_time_secs = + WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, + .ap_keepalive_max_unresponsive_time_secs = + WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, + .ap_enable_nawds = WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS, + .mcast2ucast_set = WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET, + .enable_rtscts = WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS, + .txbf = WMI_10_4_VDEV_PARAM_TXBF, + .packet_powersave = WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE, + .drop_unencry = WMI_10_4_VDEV_PARAM_DROP_UNENCRY, + .tx_encap_type = WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE, + .ap_detect_out_of_sync_sleeping_sta_time_secs = + WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, + .rc_num_retries = WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES, + .cabq_maxdur = WMI_10_4_VDEV_PARAM_CABQ_MAXDUR, + .mfptest_set = WMI_10_4_VDEV_PARAM_MFPTEST_SET, + .rts_fixed_rate = WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE, + .vht_sgimask = WMI_10_4_VDEV_PARAM_VHT_SGIMASK, + .vht80_ratemask = WMI_10_4_VDEV_PARAM_VHT80_RATEMASK, + .early_rx_adjust_enable = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE, + .early_rx_tgt_bmiss_num = WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM, + .early_rx_bmiss_sample_cycle = + WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE, + .early_rx_slop_step = WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP, + .early_rx_init_slop = WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP, + .early_rx_adjust_pause = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE, + .proxy_sta = WMI_10_4_VDEV_PARAM_PROXY_STA, + .meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC, + .rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE, + .bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK, + .inc_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT, + .dec_tsf = WMI_10_4_VDEV_PARAM_TSF_DECREMENT, + .disable_4addr_src_lrn = WMI_10_4_VDEV_PARAM_DISABLE_4_ADDR_SRC_LRN, + .rtt_responder_role = WMI_10_4_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE, +}; + +static struct wmi_pdev_param_map wmi_pdev_param_map = { + .tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK, + .rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK, + .txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G, + .txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G, + .txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE, + .beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE, + .beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE, + .resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE, + .protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE, + .dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW, + .non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH, + .agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH, + .sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH, + .ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING, + .ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE, + .ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE, + .ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK, + .ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI, + .ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO, + .ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, + .ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE, + .ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE, + .ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, + .l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE, + .dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE, + .pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH, + .pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, + .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, + .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE, + .pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, + .vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, + .peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, + .bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, + .pmf_qos = WMI_PDEV_PARAM_PMF_QOS, + .arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE, + .dcs = WMI_PDEV_PARAM_DCS, + .ani_enable = WMI_PDEV_PARAM_ANI_ENABLE, + .ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD, + .ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD, + .ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL, + .ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL, + .dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN, + .proxy_sta = WMI_PDEV_PARAM_PROXY_STA, + .idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG, + .power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP, + .fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED, + .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED, + .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .cal_period = WMI_PDEV_PARAM_UNSUPPORTED, + .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED, + .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED, + .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED, + .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED, + .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, + .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, + .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED, + .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED, + .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .en_stats = WMI_PDEV_PARAM_UNSUPPORTED, + .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED, + .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED, + .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED, + .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED, + .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED, + .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED, + .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED, + .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED, + .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED, + .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED, + .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED, + .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED, + .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED, + .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED, + .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, + .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED, +}; + +static struct wmi_pdev_param_map wmi_10x_pdev_param_map = { + .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK, + .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK, + .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G, + .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G, + .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE, + .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE, + .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE, + .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE, + .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE, + .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW, + .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH, + .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH, + .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH, + .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING, + .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE, + .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE, + .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK, + .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI, + .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO, + .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, + .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE, + .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE, + .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, + .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE, + .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE, + .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED, + .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED, + .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED, + .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED, + .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, + .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, + .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, + .bcnflt_stats_update_period = + WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, + .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS, + .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE, + .dcs = WMI_10X_PDEV_PARAM_DCS, + .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE, + .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD, + .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD, + .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL, + .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL, + .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN, + .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED, + .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED, + .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED, + .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET, + .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR, + .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE, + .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD, + .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED, + .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED, + .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED, + .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED, + .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, + .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, + .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED, + .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED, + .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .en_stats = WMI_PDEV_PARAM_UNSUPPORTED, + .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED, + .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED, + .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED, + .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED, + .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED, + .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED, + .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED, + .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED, + .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED, + .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED, + .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED, + .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED, + .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED, + .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED, + .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, + .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED, +}; + +static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = { + .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK, + .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK, + .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G, + .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G, + .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE, + .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE, + .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE, + .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE, + .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE, + .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW, + .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH, + .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH, + .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH, + .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING, + .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE, + .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE, + .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK, + .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI, + .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO, + .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, + .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE, + .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE, + .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, + .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE, + .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE, + .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED, + .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED, + .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED, + .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED, + .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, + .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, + .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, + .bcnflt_stats_update_period = + WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, + .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS, + .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE, + .dcs = WMI_10X_PDEV_PARAM_DCS, + .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE, + .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD, + .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD, + .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL, + .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL, + .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN, + .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED, + .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED, + .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED, + .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET, + .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR, + .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE, + .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD, + .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED, + .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED, + .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED, + .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED, + .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, + .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, + .peer_sta_ps_statechg_enable = + WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE, + .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED, + .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED, + .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .en_stats = WMI_PDEV_PARAM_UNSUPPORTED, + .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED, + .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED, + .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED, + .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED, + .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED, + .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED, + .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED, + .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED, + .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED, + .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED, + .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED, + .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED, + .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED, + .pdev_reset = WMI_10X_PDEV_PARAM_PDEV_RESET, + .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, + .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED, +}; + +/* firmware 10.2 specific mappings */ +static struct wmi_cmd_map wmi_10_2_cmd_map = { + .init_cmdid = WMI_10_2_INIT_CMDID, + .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID, + .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID, + .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID, + .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED, + .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID, + .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID, + .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID, + .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID, + .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID, + .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID, + .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID, + .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID, + .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID, + .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID, + .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID, + .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID, + .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID, + .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID, + .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID, + .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID, + .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID, + .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID, + .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID, + .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID, + .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID, + .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID, + .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID, + .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID, + .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID, + .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID, + .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID, + .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID, + .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID, + .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID, + .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID, + .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED, + .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID, + .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID, + .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID, + .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED, + .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID, + .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID, + .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID, + .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID, + .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID, + .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID, + .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID, + .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID, + .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID, + .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID, + .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID, + .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE, + .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD, + .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD, + .roam_scan_rssi_change_threshold = + WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE, + .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE, + .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE, + .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD, + .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO, + .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY, + .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE, + .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE, + .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED, + .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID, + .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED, + .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID, + .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID, + .wlan_profile_set_hist_intvl_cmdid = + WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + .wlan_profile_get_profile_data_cmdid = + WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + .wlan_profile_enable_profile_id_cmdid = + WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + .wlan_profile_list_profile_id_cmdid = + WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, + .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID, + .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID, + .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID, + .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID, + .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID, + .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID, + .wow_enable_disable_wake_event_cmdid = + WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, + .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID, + .wow_hostwakeup_from_sleep_cmdid = + WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, + .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID, + .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID, + .vdev_spectral_scan_configure_cmdid = + WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, + .vdev_spectral_scan_enable_cmdid = + WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, + .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID, + .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED, + .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED, + .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED, + .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED, + .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED, + .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED, + .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED, + .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED, + .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED, + .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED, + .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED, + .echo_cmdid = WMI_10_2_ECHO_CMDID, + .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID, + .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID, + .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID, + .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED, + .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID, + .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID, + .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED, + .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED, + .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED, + .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .oem_req_cmdid = WMI_CMD_UNSUPPORTED, + .nan_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED, + .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED, + .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED, + .fwtest_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED, + .radar_found_cmdid = WMI_CMD_UNSUPPORTED, +}; + +static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = { + .tx_chain_mask = WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK, + .rx_chain_mask = WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK, + .txpower_limit2g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G, + .txpower_limit5g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G, + .txpower_scale = WMI_10_4_PDEV_PARAM_TXPOWER_SCALE, + .beacon_gen_mode = WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE, + .beacon_tx_mode = WMI_10_4_PDEV_PARAM_BEACON_TX_MODE, + .resmgr_offchan_mode = WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE, + .protection_mode = WMI_10_4_PDEV_PARAM_PROTECTION_MODE, + .dynamic_bw = WMI_10_4_PDEV_PARAM_DYNAMIC_BW, + .non_agg_sw_retry_th = WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH, + .agg_sw_retry_th = WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH, + .sta_kickout_th = WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH, + .ac_aggrsize_scaling = WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING, + .ltr_enable = WMI_10_4_PDEV_PARAM_LTR_ENABLE, + .ltr_ac_latency_be = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE, + .ltr_ac_latency_bk = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK, + .ltr_ac_latency_vi = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI, + .ltr_ac_latency_vo = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO, + .ltr_ac_latency_timeout = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, + .ltr_sleep_override = WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE, + .ltr_rx_override = WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE, + .ltr_tx_activity_timeout = WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, + .l1ss_enable = WMI_10_4_PDEV_PARAM_L1SS_ENABLE, + .dsleep_enable = WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE, + .pcielp_txbuf_flush = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH, + .pcielp_txbuf_watermark = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK, + .pcielp_txbuf_tmo_en = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, + .pcielp_txbuf_tmo_value = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE, + .pdev_stats_update_period = + WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, + .vdev_stats_update_period = + WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, + .peer_stats_update_period = + WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, + .bcnflt_stats_update_period = + WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, + .pmf_qos = WMI_10_4_PDEV_PARAM_PMF_QOS, + .arp_ac_override = WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE, + .dcs = WMI_10_4_PDEV_PARAM_DCS, + .ani_enable = WMI_10_4_PDEV_PARAM_ANI_ENABLE, + .ani_poll_period = WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD, + .ani_listen_period = WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD, + .ani_ofdm_level = WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL, + .ani_cck_level = WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL, + .dyntxchain = WMI_10_4_PDEV_PARAM_DYNTXCHAIN, + .proxy_sta = WMI_10_4_PDEV_PARAM_PROXY_STA, + .idle_ps_config = WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG, + .power_gating_sleep = WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP, + .fast_channel_reset = WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET, + .burst_dur = WMI_10_4_PDEV_PARAM_BURST_DUR, + .burst_enable = WMI_10_4_PDEV_PARAM_BURST_ENABLE, + .cal_period = WMI_10_4_PDEV_PARAM_CAL_PERIOD, + .aggr_burst = WMI_10_4_PDEV_PARAM_AGGR_BURST, + .rx_decap_mode = WMI_10_4_PDEV_PARAM_RX_DECAP_MODE, + .smart_antenna_default_antenna = + WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA, + .igmpmld_override = WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE, + .igmpmld_tid = WMI_10_4_PDEV_PARAM_IGMPMLD_TID, + .antenna_gain = WMI_10_4_PDEV_PARAM_ANTENNA_GAIN, + .rx_filter = WMI_10_4_PDEV_PARAM_RX_FILTER, + .set_mcast_to_ucast_tid = WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID, + .proxy_sta_mode = WMI_10_4_PDEV_PARAM_PROXY_STA_MODE, + .set_mcast2ucast_mode = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE, + .set_mcast2ucast_buffer = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER, + .remove_mcast2ucast_buffer = + WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER, + .peer_sta_ps_statechg_enable = + WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE, + .igmpmld_ac_override = WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE, + .block_interbss = WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS, + .set_disable_reset_cmdid = WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID, + .set_msdu_ttl_cmdid = WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID, + .set_ppdu_duration_cmdid = WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID, + .txbf_sound_period_cmdid = WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID, + .set_promisc_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID, + .set_burst_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID, + .en_stats = WMI_10_4_PDEV_PARAM_EN_STATS, + .mu_group_policy = WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY, + .noise_detection = WMI_10_4_PDEV_PARAM_NOISE_DETECTION, + .noise_threshold = WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD, + .dpd_enable = WMI_10_4_PDEV_PARAM_DPD_ENABLE, + .set_mcast_bcast_echo = WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO, + .atf_strict_sch = WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH, + .atf_sched_duration = WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION, + .ant_plzn = WMI_10_4_PDEV_PARAM_ANT_PLZN, + .mgmt_retry_limit = WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT, + .sensitivity_level = WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL, + .signed_txpower_2g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G, + .signed_txpower_5g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G, + .enable_per_tid_amsdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU, + .enable_per_tid_ampdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU, + .cca_threshold = WMI_10_4_PDEV_PARAM_CCA_THRESHOLD, + .rts_fixed_rate = WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE, + .pdev_reset = WMI_10_4_PDEV_PARAM_PDEV_RESET, + .wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET, + .arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR, + .arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR, + .enable_btcoex = WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX, +}; + +static const u8 wmi_key_cipher_suites[] = { + [WMI_CIPHER_NONE] = WMI_CIPHER_NONE, + [WMI_CIPHER_WEP] = WMI_CIPHER_WEP, + [WMI_CIPHER_TKIP] = WMI_CIPHER_TKIP, + [WMI_CIPHER_AES_OCB] = WMI_CIPHER_AES_OCB, + [WMI_CIPHER_AES_CCM] = WMI_CIPHER_AES_CCM, + [WMI_CIPHER_WAPI] = WMI_CIPHER_WAPI, + [WMI_CIPHER_CKIP] = WMI_CIPHER_CKIP, + [WMI_CIPHER_AES_CMAC] = WMI_CIPHER_AES_CMAC, + [WMI_CIPHER_AES_GCM] = WMI_CIPHER_AES_GCM, +}; + +static const u8 wmi_tlv_key_cipher_suites[] = { + [WMI_CIPHER_NONE] = WMI_TLV_CIPHER_NONE, + [WMI_CIPHER_WEP] = WMI_TLV_CIPHER_WEP, + [WMI_CIPHER_TKIP] = WMI_TLV_CIPHER_TKIP, + [WMI_CIPHER_AES_OCB] = WMI_TLV_CIPHER_AES_OCB, + [WMI_CIPHER_AES_CCM] = WMI_TLV_CIPHER_AES_CCM, + [WMI_CIPHER_WAPI] = WMI_TLV_CIPHER_WAPI, + [WMI_CIPHER_CKIP] = WMI_TLV_CIPHER_CKIP, + [WMI_CIPHER_AES_CMAC] = WMI_TLV_CIPHER_AES_CMAC, + [WMI_CIPHER_AES_GCM] = WMI_TLV_CIPHER_AES_GCM, +}; + +static const struct wmi_peer_flags_map wmi_peer_flags_map = { + .auth = WMI_PEER_AUTH, + .qos = WMI_PEER_QOS, + .need_ptk_4_way = WMI_PEER_NEED_PTK_4_WAY, + .need_gtk_2_way = WMI_PEER_NEED_GTK_2_WAY, + .apsd = WMI_PEER_APSD, + .ht = WMI_PEER_HT, + .bw40 = WMI_PEER_40MHZ, + .stbc = WMI_PEER_STBC, + .ldbc = WMI_PEER_LDPC, + .dyn_mimops = WMI_PEER_DYN_MIMOPS, + .static_mimops = WMI_PEER_STATIC_MIMOPS, + .spatial_mux = WMI_PEER_SPATIAL_MUX, + .vht = WMI_PEER_VHT, + .bw80 = WMI_PEER_80MHZ, + .vht_2g = WMI_PEER_VHT_2G, + .pmf = WMI_PEER_PMF, + .bw160 = WMI_PEER_160MHZ, +}; + +static const struct wmi_peer_flags_map wmi_10x_peer_flags_map = { + .auth = WMI_10X_PEER_AUTH, + .qos = WMI_10X_PEER_QOS, + .need_ptk_4_way = WMI_10X_PEER_NEED_PTK_4_WAY, + .need_gtk_2_way = WMI_10X_PEER_NEED_GTK_2_WAY, + .apsd = WMI_10X_PEER_APSD, + .ht = WMI_10X_PEER_HT, + .bw40 = WMI_10X_PEER_40MHZ, + .stbc = WMI_10X_PEER_STBC, + .ldbc = WMI_10X_PEER_LDPC, + .dyn_mimops = WMI_10X_PEER_DYN_MIMOPS, + .static_mimops = WMI_10X_PEER_STATIC_MIMOPS, + .spatial_mux = WMI_10X_PEER_SPATIAL_MUX, + .vht = WMI_10X_PEER_VHT, + .bw80 = WMI_10X_PEER_80MHZ, + .bw160 = WMI_10X_PEER_160MHZ, +}; + +static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = { + .auth = WMI_10_2_PEER_AUTH, + .qos = WMI_10_2_PEER_QOS, + .need_ptk_4_way = WMI_10_2_PEER_NEED_PTK_4_WAY, + .need_gtk_2_way = WMI_10_2_PEER_NEED_GTK_2_WAY, + .apsd = WMI_10_2_PEER_APSD, + .ht = WMI_10_2_PEER_HT, + .bw40 = WMI_10_2_PEER_40MHZ, + .stbc = WMI_10_2_PEER_STBC, + .ldbc = WMI_10_2_PEER_LDPC, + .dyn_mimops = WMI_10_2_PEER_DYN_MIMOPS, + .static_mimops = WMI_10_2_PEER_STATIC_MIMOPS, + .spatial_mux = WMI_10_2_PEER_SPATIAL_MUX, + .vht = WMI_10_2_PEER_VHT, + .bw80 = WMI_10_2_PEER_80MHZ, + .vht_2g = WMI_10_2_PEER_VHT_2G, + .pmf = WMI_10_2_PEER_PMF, + .bw160 = WMI_10_2_PEER_160MHZ, +}; + +void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch, + const struct wmi_channel_arg *arg) { - int ret; + u32 flags = 0; + struct ieee80211_channel *chan = NULL; - ret = wait_event_timeout(ar->wmi.wq, - atomic_read(&ar->wmi.pending_tx_count) == 0, - 5*HZ); - if (atomic_read(&ar->wmi.pending_tx_count) == 0) - return; + memset(ch, 0, sizeof(*ch)); + + if (arg->passive) + flags |= WMI_CHAN_FLAG_PASSIVE; + if (arg->allow_ibss) + flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED; + if (arg->allow_ht) + flags |= WMI_CHAN_FLAG_ALLOW_HT; + if (arg->allow_vht) + flags |= WMI_CHAN_FLAG_ALLOW_VHT; + if (arg->ht40plus) + flags |= WMI_CHAN_FLAG_HT40_PLUS; + if (arg->chan_radar) + flags |= WMI_CHAN_FLAG_DFS; + + ch->band_center_freq2 = 0; + ch->mhz = __cpu_to_le32(arg->freq); + ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1); + if (arg->mode == MODE_11AC_VHT80_80) { + ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq2); + chan = ieee80211_get_channel(ar->hw->wiphy, + arg->band_center_freq2 - 10); + } + + if (arg->mode == MODE_11AC_VHT160) { + u32 band_center_freq1; + u32 band_center_freq2; + + if (arg->freq > arg->band_center_freq1) { + band_center_freq1 = arg->band_center_freq1 + 40; + band_center_freq2 = arg->band_center_freq1 - 40; + } else { + band_center_freq1 = arg->band_center_freq1 - 40; + band_center_freq2 = arg->band_center_freq1 + 40; + } + + ch->band_center_freq1 = + __cpu_to_le32(band_center_freq1); + /* Minus 10 to get a defined 5G channel frequency*/ + chan = ieee80211_get_channel(ar->hw->wiphy, + band_center_freq2 - 10); + /* The center frequency of the entire VHT160 */ + ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq1); + } + + if (chan && chan->flags & IEEE80211_CHAN_RADAR) + flags |= WMI_CHAN_FLAG_DFS_CFREQ2; - if (ret == 0) - ret = -ETIMEDOUT; + ch->min_power = arg->min_power; + ch->max_power = arg->max_power; + ch->reg_power = arg->max_reg_power; + ch->antenna_max = arg->max_antenna_gain; + ch->max_tx_power = arg->max_power; - if (ret < 0) - ath10k_warn("wmi flush failed (%d)\n", ret); + /* mode & flags share storage */ + ch->mode = arg->mode; + ch->flags |= __cpu_to_le32(flags); } int ath10k_wmi_wait_for_service_ready(struct ath10k *ar) { - int ret; - ret = wait_for_completion_timeout(&ar->wmi.service_ready, - WMI_SERVICE_READY_TIMEOUT_HZ); - return ret; + unsigned long time_left, i; + + time_left = wait_for_completion_timeout(&ar->wmi.service_ready, + WMI_SERVICE_READY_TIMEOUT_HZ); + if (!time_left) { + /* Sometimes the PCI HIF doesn't receive interrupt + * for the service ready message even if the buffer + * was completed. PCIe sniffer shows that it's + * because the corresponding CE ring doesn't fires + * it. Workaround here by polling CE rings once. + */ + ath10k_warn(ar, "failed to receive service ready completion, polling..\n"); + + for (i = 0; i < CE_COUNT; i++) + ath10k_hif_send_complete_check(ar, i, 1); + + time_left = wait_for_completion_timeout(&ar->wmi.service_ready, + WMI_SERVICE_READY_TIMEOUT_HZ); + if (!time_left) { + ath10k_warn(ar, "polling timed out\n"); + return -ETIMEDOUT; + } + + ath10k_warn(ar, "service ready completion received, continuing normally\n"); + } + + return 0; } int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar) { - int ret; - ret = wait_for_completion_timeout(&ar->wmi.unified_ready, - WMI_UNIFIED_READY_TIMEOUT_HZ); - return ret; + unsigned long time_left; + + time_left = wait_for_completion_timeout(&ar->wmi.unified_ready, + WMI_UNIFIED_READY_TIMEOUT_HZ); + if (!time_left) + return -ETIMEDOUT; + return 0; } -static struct sk_buff *ath10k_wmi_alloc_skb(u32 len) +struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len) { struct sk_buff *skb; u32 round_len = roundup(len, 4); - skb = ath10k_htc_alloc_skb(WMI_SKB_HEADROOM + round_len); + skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len); if (!skb) return NULL; skb_reserve(skb, WMI_SKB_HEADROOM); if (!IS_ALIGNED((unsigned long)skb->data, 4)) - ath10k_warn("Unaligned WMI skb\n"); + ath10k_warn(ar, "Unaligned WMI skb\n"); skb_put(skb, round_len); memset(skb->data, 0, round_len); @@ -78,18 +1826,14 @@ static struct sk_buff *ath10k_wmi_alloc_skb(u32 len) static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) { dev_kfree_skb(skb); - - if (atomic_sub_return(1, &ar->wmi.pending_tx_count) == 0) - wake_up(&ar->wmi.wq); } -/* WMI command API */ -static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, - enum wmi_cmd_id cmd_id) +int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb, + u32 cmd_id) { struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); struct wmi_cmd_hdr *cmd_hdr; - int status; + int ret; u32 cmd = 0; if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL) @@ -100,119 +1844,428 @@ static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, cmd_hdr = (struct wmi_cmd_hdr *)skb->data; cmd_hdr->cmd_id = __cpu_to_le32(cmd); - if (atomic_add_return(1, &ar->wmi.pending_tx_count) > - WMI_MAX_PENDING_TX_COUNT) { - /* avoid using up memory when FW hangs */ - atomic_dec(&ar->wmi.pending_tx_count); - return -EBUSY; + memset(skb_cb, 0, sizeof(*skb_cb)); + trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len); + ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb); + + if (ret) + goto err_pull; + + return 0; + +err_pull: + skb_pull(skb, sizeof(struct wmi_cmd_hdr)); + return ret; +} + +static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + struct ath10k_skb_cb *cb; + struct sk_buff *bcn; + bool dtim_zero; + bool deliver_cab; + int ret; + + spin_lock_bh(&ar->data_lock); + + bcn = arvif->beacon; + + if (!bcn) + goto unlock; + + cb = ATH10K_SKB_CB(bcn); + + switch (arvif->beacon_state) { + case ATH10K_BEACON_SENDING: + case ATH10K_BEACON_SENT: + break; + case ATH10K_BEACON_SCHEDULED: + arvif->beacon_state = ATH10K_BEACON_SENDING; + spin_unlock_bh(&ar->data_lock); + + dtim_zero = !!(cb->flags & ATH10K_SKB_F_DTIM_ZERO); + deliver_cab = !!(cb->flags & ATH10K_SKB_F_DELIVER_CAB); + ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar, + arvif->vdev_id, + bcn->data, bcn->len, + cb->paddr, + dtim_zero, + deliver_cab); + + spin_lock_bh(&ar->data_lock); + + if (ret == 0) + arvif->beacon_state = ATH10K_BEACON_SENT; + else + arvif->beacon_state = ATH10K_BEACON_SCHEDULED; } - memset(skb_cb, 0, sizeof(*skb_cb)); +unlock: + spin_unlock_bh(&ar->data_lock); +} + +static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct ath10k_vif *arvif = (void *)vif->drv_priv; - trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len); + ath10k_wmi_tx_beacon_nowait(arvif); +} - status = ath10k_htc_send(ar->htc, ar->wmi.eid, skb); - if (status) { +static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar) +{ + ieee80211_iterate_active_interfaces_atomic(ar->hw, + ATH10K_ITER_NORMAL_FLAGS, + ath10k_wmi_tx_beacons_iter, + NULL); +} + +static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar) +{ + /* try to send pending beacons first. they take priority */ + ath10k_wmi_tx_beacons_nowait(ar); + + wake_up(&ar->wmi.tx_credits_wq); +} + +int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id) +{ + int ret = -EOPNOTSUPP; + + might_sleep(); + + if (cmd_id == WMI_CMD_UNSUPPORTED) { + ath10k_warn(ar, "wmi command %d is not supported by firmware\n", + cmd_id); dev_kfree_skb_any(skb); - atomic_dec(&ar->wmi.pending_tx_count); - return status; + return ret; } - return 0; + wait_event_timeout(ar->wmi.tx_credits_wq, ({ + if (ar->state == ATH10K_STATE_WEDGED) { + ret = -ESHUTDOWN; + ath10k_dbg(ar, ATH10K_DBG_WMI, + "drop wmi command %d, hardware is wedged\n", cmd_id); + } + /* try to send pending beacons first. they take priority */ + ath10k_wmi_tx_beacons_nowait(ar); + + ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id); + + if (ret && test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) + ret = -ESHUTDOWN; + + (ret != -EAGAIN); + }), 3 * HZ); + + if (ret) + dev_kfree_skb_any(skb); + + if (ret == -EAGAIN) { + ath10k_warn(ar, "wmi command %d timeout, restarting hardware\n", + cmd_id); + ath10k_core_start_recovery(ar); + } + + return ret; } -static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) +static struct sk_buff * +ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) { - struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data; - enum wmi_scan_event_type event_type; - enum wmi_scan_completion_reason reason; - u32 freq; - u32 req_id; - u32 scan_id; + struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu); + struct ath10k_vif *arvif; + struct wmi_mgmt_tx_cmd *cmd; + struct ieee80211_hdr *hdr; + struct sk_buff *skb; + int len; u32 vdev_id; + u32 buf_len = msdu->len; + u16 fc; + const u8 *peer_addr; + + hdr = (struct ieee80211_hdr *)msdu->data; + fc = le16_to_cpu(hdr->frame_control); - event_type = __le32_to_cpu(event->event_type); - reason = __le32_to_cpu(event->reason); - freq = __le32_to_cpu(event->channel_freq); - req_id = __le32_to_cpu(event->scan_req_id); - scan_id = __le32_to_cpu(event->scan_id); - vdev_id = __le32_to_cpu(event->vdev_id); + if (cb->vif) { + arvif = (void *)cb->vif->drv_priv; + vdev_id = arvif->vdev_id; + } else { + vdev_id = 0; + } - ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENTID\n"); - ath10k_dbg(ATH10K_DBG_WMI, - "scan event type %d reason %d freq %d req_id %d " - "scan_id %d vdev_id %d\n", - event_type, reason, freq, req_id, scan_id, vdev_id); + if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control))) + return ERR_PTR(-EINVAL); - spin_lock_bh(&ar->data_lock); + len = sizeof(cmd->hdr) + msdu->len; - switch (event_type) { - case WMI_SCAN_EVENT_STARTED: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_STARTED\n"); - if (ar->scan.in_progress && ar->scan.is_roc) + if ((ieee80211_is_action(hdr->frame_control) || + ieee80211_is_deauth(hdr->frame_control) || + ieee80211_is_disassoc(hdr->frame_control)) && + ieee80211_has_protected(hdr->frame_control)) { + peer_addr = hdr->addr1; + if (is_multicast_ether_addr(peer_addr)) { + len += sizeof(struct ieee80211_mmie_16); + buf_len += sizeof(struct ieee80211_mmie_16); + } else { + if (cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP || + cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP_256) { + len += IEEE80211_GCMP_MIC_LEN; + buf_len += IEEE80211_GCMP_MIC_LEN; + } else { + len += IEEE80211_CCMP_MIC_LEN; + buf_len += IEEE80211_CCMP_MIC_LEN; + } + } + } + + len = round_up(len, 4); + + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_mgmt_tx_cmd *)skb->data; + + cmd->hdr.vdev_id = __cpu_to_le32(vdev_id); + cmd->hdr.tx_rate = 0; + cmd->hdr.tx_power = 0; + cmd->hdr.buf_len = __cpu_to_le32(buf_len); + + ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr)); + memcpy(cmd->buf, msdu->data, msdu->len); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n", + msdu, skb->len, fc & IEEE80211_FCTL_FTYPE, + fc & IEEE80211_FCTL_STYPE); + trace_ath10k_tx_hdr(ar, skb->data, skb->len); + trace_ath10k_tx_payload(ar, skb->data, skb->len); + + return skb; +} + +static void ath10k_wmi_event_scan_started(struct ath10k *ar) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: + ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n", + ath10k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ATH10K_SCAN_STARTING: + ar->scan.state = ATH10K_SCAN_RUNNING; + + if (ar->scan.is_roc) ieee80211_ready_on_channel(ar->hw); complete(&ar->scan.started); break; + } +} + +static void ath10k_wmi_event_scan_start_failed(struct ath10k *ar) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: + ath10k_warn(ar, "received scan start failed event in an invalid scan state: %s (%d)\n", + ath10k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ATH10K_SCAN_STARTING: + complete(&ar->scan.started); + __ath10k_scan_finish(ar); + break; + } +} + +static void ath10k_wmi_event_scan_completed(struct ath10k *ar) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + case ATH10K_SCAN_STARTING: + /* One suspected reason scan can be completed while starting is + * if firmware fails to deliver all scan events to the host, + * e.g. when transport pipe is full. This has been observed + * with spectral scan phyerr events starving wmi transport + * pipe. In such case the "scan completed" event should be (and + * is) ignored by the host as it may be just firmware's scan + * state machine recovering. + */ + ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n", + ath10k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: + __ath10k_scan_finish(ar); + break; + } +} + +static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + case ATH10K_SCAN_STARTING: + ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n", + ath10k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: + ar->scan_channel = NULL; + break; + } +} + +static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + case ATH10K_SCAN_STARTING: + ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n", + ath10k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: + ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq); + + if (ar->scan.is_roc && ar->scan.roc_freq == freq) + complete(&ar->scan.on_channel); + break; + } +} + +static const char * +ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type, + enum wmi_scan_completion_reason reason) +{ + switch (type) { + case WMI_SCAN_EVENT_STARTED: + return "started"; case WMI_SCAN_EVENT_COMPLETED: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_COMPLETED\n"); switch (reason) { case WMI_SCAN_REASON_COMPLETED: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_COMPLETED\n"); - break; + return "completed"; case WMI_SCAN_REASON_CANCELLED: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_CANCELED\n"); - break; + return "completed [cancelled]"; case WMI_SCAN_REASON_PREEMPTED: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_PREEMPTED\n"); - break; + return "completed [preempted]"; case WMI_SCAN_REASON_TIMEDOUT: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_TIMEDOUT\n"); - break; - default: + return "completed [timedout]"; + case WMI_SCAN_REASON_INTERNAL_FAILURE: + return "completed [internal err]"; + case WMI_SCAN_REASON_MAX: break; } + return "completed [unknown]"; + case WMI_SCAN_EVENT_BSS_CHANNEL: + return "bss channel"; + case WMI_SCAN_EVENT_FOREIGN_CHANNEL: + return "foreign channel"; + case WMI_SCAN_EVENT_DEQUEUED: + return "dequeued"; + case WMI_SCAN_EVENT_PREEMPTED: + return "preempted"; + case WMI_SCAN_EVENT_START_FAILED: + return "start failed"; + case WMI_SCAN_EVENT_RESTARTED: + return "restarted"; + case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT: + return "foreign channel exit"; + default: + return "unknown"; + } +} - ar->scan_channel = NULL; - if (!ar->scan.in_progress) { - ath10k_warn("no scan requested, ignoring\n"); - break; - } +static int ath10k_wmi_op_pull_scan_ev(struct ath10k *ar, struct sk_buff *skb, + struct wmi_scan_ev_arg *arg) +{ + struct wmi_scan_event *ev = (void *)skb->data; - if (ar->scan.is_roc) { - ath10k_offchan_tx_purge(ar); + if (skb->len < sizeof(*ev)) + return -EPROTO; - if (!ar->scan.aborting) - ieee80211_remain_on_channel_expired(ar->hw); - } else { - ieee80211_scan_completed(ar->hw, ar->scan.aborting); - } + skb_pull(skb, sizeof(*ev)); + arg->event_type = ev->event_type; + arg->reason = ev->reason; + arg->channel_freq = ev->channel_freq; + arg->scan_req_id = ev->scan_req_id; + arg->scan_id = ev->scan_id; + arg->vdev_id = ev->vdev_id; - del_timer(&ar->scan.timeout); - complete_all(&ar->scan.completed); - ar->scan.in_progress = false; + return 0; +} + +int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_scan_ev_arg arg = {}; + enum wmi_scan_event_type event_type; + enum wmi_scan_completion_reason reason; + u32 freq; + u32 req_id; + u32 scan_id; + u32 vdev_id; + int ret; + + ret = ath10k_wmi_pull_scan(ar, skb, &arg); + if (ret) { + ath10k_warn(ar, "failed to parse scan event: %d\n", ret); + return ret; + } + + event_type = __le32_to_cpu(arg.event_type); + reason = __le32_to_cpu(arg.reason); + freq = __le32_to_cpu(arg.channel_freq); + req_id = __le32_to_cpu(arg.scan_req_id); + scan_id = __le32_to_cpu(arg.scan_id); + vdev_id = __le32_to_cpu(arg.vdev_id); + + spin_lock_bh(&ar->data_lock); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n", + ath10k_wmi_event_scan_type_str(event_type, reason), + event_type, reason, freq, req_id, scan_id, vdev_id, + ath10k_scan_state_str(ar->scan.state), ar->scan.state); + + switch (event_type) { + case WMI_SCAN_EVENT_STARTED: + ath10k_wmi_event_scan_started(ar); + break; + case WMI_SCAN_EVENT_COMPLETED: + ath10k_wmi_event_scan_completed(ar); break; case WMI_SCAN_EVENT_BSS_CHANNEL: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_BSS_CHANNEL\n"); - ar->scan_channel = NULL; + ath10k_wmi_event_scan_bss_chan(ar); break; case WMI_SCAN_EVENT_FOREIGN_CHANNEL: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_FOREIGN_CHANNEL\n"); - ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq); - if (ar->scan.in_progress && ar->scan.is_roc && - ar->scan.roc_freq == freq) { - complete(&ar->scan.on_channel); - } - break; - case WMI_SCAN_EVENT_DEQUEUED: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_DEQUEUED\n"); - break; - case WMI_SCAN_EVENT_PREEMPTED: - ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_PREEMPTED\n"); + ath10k_wmi_event_scan_foreign_chan(ar, freq); break; case WMI_SCAN_EVENT_START_FAILED: - ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_START_FAILED\n"); + ath10k_warn(ar, "received scan start failure event\n"); + ath10k_wmi_event_scan_start_failed(ar); break; + case WMI_SCAN_EVENT_DEQUEUED: + case WMI_SCAN_EVENT_PREEMPTED: + case WMI_SCAN_EVENT_RESTARTED: + case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT: default: break; } @@ -221,219 +2274,1278 @@ static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) return 0; } -static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode) +/* If keys are configured, HW decrypts all frames + * with protected bit set. Mark such frames as decrypted. + */ +static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar, + struct sk_buff *skb, + struct ieee80211_rx_status *status) { - enum ieee80211_band band; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + unsigned int hdrlen; + bool peer_key; + u8 *addr, keyidx; - switch (phy_mode) { - case MODE_11A: - case MODE_11NA_HT20: - case MODE_11NA_HT40: - case MODE_11AC_VHT20: - case MODE_11AC_VHT40: - case MODE_11AC_VHT80: - band = IEEE80211_BAND_5GHZ; - break; - case MODE_11G: - case MODE_11B: - case MODE_11GONLY: - case MODE_11NG_HT20: - case MODE_11NG_HT40: - case MODE_11AC_VHT20_2G: - case MODE_11AC_VHT40_2G: - case MODE_11AC_VHT80_2G: - default: - band = IEEE80211_BAND_2GHZ; + if (!ieee80211_is_auth(hdr->frame_control) || + !ieee80211_has_protected(hdr->frame_control)) + return; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + if (skb->len < (hdrlen + IEEE80211_WEP_IV_LEN)) + return; + + keyidx = skb->data[hdrlen + (IEEE80211_WEP_IV_LEN - 1)] >> WEP_KEYID_SHIFT; + addr = ieee80211_get_SA(hdr); + + spin_lock_bh(&ar->data_lock); + peer_key = ath10k_mac_is_peer_wep_key_set(ar, addr, keyidx); + spin_unlock_bh(&ar->data_lock); + + if (peer_key) { + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac wep key present for peer %pM\n", addr); + status->flag |= RX_FLAG_DECRYPTED; } +} - return band; +static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb, + struct wmi_mgmt_rx_ev_arg *arg) +{ + struct wmi_mgmt_rx_event_v1 *ev_v1; + struct wmi_mgmt_rx_event_v2 *ev_v2; + struct wmi_mgmt_rx_hdr_v1 *ev_hdr; + struct wmi_mgmt_rx_ext_info *ext_info; + size_t pull_len; + u32 msdu_len; + u32 len; + + if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, + ar->running_fw->fw_file.fw_features)) { + ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data; + ev_hdr = &ev_v2->hdr.v1; + pull_len = sizeof(*ev_v2); + } else { + ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data; + ev_hdr = &ev_v1->hdr; + pull_len = sizeof(*ev_v1); + } + + if (skb->len < pull_len) + return -EPROTO; + + skb_pull(skb, pull_len); + arg->channel = ev_hdr->channel; + arg->buf_len = ev_hdr->buf_len; + arg->status = ev_hdr->status; + arg->snr = ev_hdr->snr; + arg->phy_mode = ev_hdr->phy_mode; + arg->rate = ev_hdr->rate; + + msdu_len = __le32_to_cpu(arg->buf_len); + if (skb->len < msdu_len) + return -EPROTO; + + if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) { + len = ALIGN(le32_to_cpu(arg->buf_len), 4); + ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len); + memcpy(&arg->ext_info, ext_info, + sizeof(struct wmi_mgmt_rx_ext_info)); + } + /* the WMI buffer might've ended up being padded to 4 bytes due to HTC + * trailer with credit update. Trim the excess garbage. + */ + skb_trim(skb, msdu_len); + + return 0; } -static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band) +static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_mgmt_rx_ev_arg *arg) { - u8 rate_idx = 0; + struct wmi_10_4_mgmt_rx_event *ev; + struct wmi_10_4_mgmt_rx_hdr *ev_hdr; + size_t pull_len; + u32 msdu_len; + struct wmi_mgmt_rx_ext_info *ext_info; + u32 len; + + ev = (struct wmi_10_4_mgmt_rx_event *)skb->data; + ev_hdr = &ev->hdr; + pull_len = sizeof(*ev); + + if (skb->len < pull_len) + return -EPROTO; + + skb_pull(skb, pull_len); + arg->channel = ev_hdr->channel; + arg->buf_len = ev_hdr->buf_len; + arg->status = ev_hdr->status; + arg->snr = ev_hdr->snr; + arg->phy_mode = ev_hdr->phy_mode; + arg->rate = ev_hdr->rate; + + msdu_len = __le32_to_cpu(arg->buf_len); + if (skb->len < msdu_len) + return -EPROTO; + + if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) { + len = ALIGN(le32_to_cpu(arg->buf_len), 4); + ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len); + memcpy(&arg->ext_info, ext_info, + sizeof(struct wmi_mgmt_rx_ext_info)); + } - /* rate in Kbps */ - switch (rate) { - case 1000: - rate_idx = 0; - break; - case 2000: - rate_idx = 1; - break; - case 5500: - rate_idx = 2; - break; - case 11000: - rate_idx = 3; - break; - case 6000: - rate_idx = 4; - break; - case 9000: - rate_idx = 5; - break; - case 12000: - rate_idx = 6; - break; - case 18000: - rate_idx = 7; - break; - case 24000: - rate_idx = 8; - break; - case 36000: - rate_idx = 9; - break; - case 48000: - rate_idx = 10; - break; - case 54000: - rate_idx = 11; - break; - default: - break; + /* Make sure bytes added for padding are removed. */ + skb_trim(skb, msdu_len); + + return 0; +} + +static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar, + struct ieee80211_hdr *hdr) +{ + if (!ieee80211_has_protected(hdr->frame_control)) + return false; + + /* FW delivers WEP Shared Auth frame with Protected Bit set and + * encrypted payload. However in case of PMF it delivers decrypted + * frames with Protected Bit set. + */ + if (ieee80211_is_auth(hdr->frame_control)) + return false; + + /* qca99x0 based FW delivers broadcast or multicast management frames + * (ex: group privacy action frames in mesh) as encrypted payload. + */ + if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) && + ar->hw_params.sw_decrypt_mcast_mgmt) + return false; + + return true; +} + +static int +wmi_process_mgmt_tx_comp(struct ath10k *ar, struct mgmt_tx_compl_params *param) +{ + struct ath10k_mgmt_tx_pkt_addr *pkt_addr; + struct ath10k_wmi *wmi = &ar->wmi; + struct ieee80211_tx_info *info; + struct sk_buff *msdu; + int ret; + + spin_lock_bh(&ar->data_lock); + + pkt_addr = idr_find(&wmi->mgmt_pending_tx, param->desc_id); + if (!pkt_addr) { + ath10k_warn(ar, "received mgmt tx completion for invalid msdu_id: %d\n", + param->desc_id); + ret = -ENOENT; + goto out; } - if (band == IEEE80211_BAND_5GHZ) { - if (rate_idx > 3) - /* Omit CCK rates */ - rate_idx -= 4; - else - rate_idx = 0; + msdu = pkt_addr->vaddr; + dma_unmap_single(ar->dev, pkt_addr->paddr, + msdu->len, DMA_TO_DEVICE); + info = IEEE80211_SKB_CB(msdu); + kfree(pkt_addr); + + if (param->status) { + info->flags &= ~IEEE80211_TX_STAT_ACK; + } else { + info->flags |= IEEE80211_TX_STAT_ACK; + info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR + + param->ack_rssi; + info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; } - return rate_idx; + ieee80211_tx_status_irqsafe(ar->hw, msdu); + + ret = 0; + +out: + idr_remove(&wmi->mgmt_pending_tx, param->desc_id); + spin_unlock_bh(&ar->data_lock); + return ret; } -static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) +int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb) { - struct wmi_mgmt_rx_event *event = (struct wmi_mgmt_rx_event *)skb->data; + struct wmi_tlv_mgmt_tx_compl_ev_arg arg; + struct mgmt_tx_compl_params param; + int ret; + + ret = ath10k_wmi_pull_mgmt_tx_compl(ar, skb, &arg); + if (ret) { + ath10k_warn(ar, "failed to parse mgmt comp event: %d\n", ret); + return ret; + } + + memset(¶m, 0, sizeof(struct mgmt_tx_compl_params)); + param.desc_id = __le32_to_cpu(arg.desc_id); + param.status = __le32_to_cpu(arg.status); + + if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) + param.ack_rssi = __le32_to_cpu(arg.ack_rssi); + + wmi_process_mgmt_tx_comp(ar, ¶m); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv evnt mgmt tx completion\n"); + + return 0; +} + +int ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg arg; + struct mgmt_tx_compl_params param; + u32 num_reports; + int i, ret; + + ret = ath10k_wmi_pull_mgmt_tx_bundle_compl(ar, skb, &arg); + if (ret) { + ath10k_warn(ar, "failed to parse bundle mgmt compl event: %d\n", ret); + return ret; + } + + num_reports = __le32_to_cpu(arg.num_reports); + + for (i = 0; i < num_reports; i++) { + memset(¶m, 0, sizeof(struct mgmt_tx_compl_params)); + param.desc_id = __le32_to_cpu(arg.desc_ids[i]); + param.status = __le32_to_cpu(arg.desc_ids[i]); + + if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) + param.ack_rssi = __le32_to_cpu(arg.ack_rssi[i]); + wmi_process_mgmt_tx_comp(ar, ¶m); + } + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv event bundle mgmt tx completion\n"); + + return 0; +} + +int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_mgmt_rx_ev_arg arg = {}; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr; + struct ieee80211_supported_band *sband; u32 rx_status; u32 channel; u32 phy_mode; - u32 snr; + u32 snr, rssi; u32 rate; - u32 buf_len; u16 fc; + int ret, i; - channel = __le32_to_cpu(event->hdr.channel); - buf_len = __le32_to_cpu(event->hdr.buf_len); - rx_status = __le32_to_cpu(event->hdr.status); - snr = __le32_to_cpu(event->hdr.snr); - phy_mode = __le32_to_cpu(event->hdr.phy_mode); - rate = __le32_to_cpu(event->hdr.rate); + ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg); + if (ret) { + ath10k_warn(ar, "failed to parse mgmt rx event: %d\n", ret); + dev_kfree_skb(skb); + return ret; + } + + channel = __le32_to_cpu(arg.channel); + rx_status = __le32_to_cpu(arg.status); + snr = __le32_to_cpu(arg.snr); + phy_mode = __le32_to_cpu(arg.phy_mode); + rate = __le32_to_cpu(arg.rate); memset(status, 0, sizeof(*status)); - ath10k_dbg(ATH10K_DBG_MGMT, + ath10k_dbg(ar, ATH10K_DBG_MGMT, "event mgmt rx status %08x\n", rx_status); - if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) { + if ((test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) || + (rx_status & (WMI_RX_STATUS_ERR_DECRYPT | + WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) { dev_kfree_skb(skb); return 0; } - if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) { + if (rx_status & WMI_RX_STATUS_ERR_MIC) + status->flag |= RX_FLAG_MMIC_ERROR; + + if (rx_status & WMI_RX_STATUS_EXT_INFO) { + status->mactime = + __le64_to_cpu(arg.ext_info.rx_mac_timestamp); + status->flag |= RX_FLAG_MACTIME_END; + } + /* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to + * MODE_11B. This means phy_mode is not a reliable source for the band + * of mgmt rx. + */ + if (channel >= 1 && channel <= 14) { + status->band = NL80211_BAND_2GHZ; + } else if (channel >= 36 && channel <= ATH10K_MAX_5G_CHAN) { + status->band = NL80211_BAND_5GHZ; + } else { + /* Shouldn't happen unless list of advertised channels to + * mac80211 has been changed. + */ + WARN_ON_ONCE(1); dev_kfree_skb(skb); return 0; } - if (rx_status & WMI_RX_STATUS_ERR_CRC) - status->flag |= RX_FLAG_FAILED_FCS_CRC; - if (rx_status & WMI_RX_STATUS_ERR_MIC) - status->flag |= RX_FLAG_MMIC_ERROR; + if (phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ) + ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n"); + + sband = &ar->mac.sbands[status->band]; - status->band = phy_mode_to_band(phy_mode); status->freq = ieee80211_channel_to_frequency(channel, status->band); status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR; - status->rate_idx = get_rate_idx(rate, status->band); - skb_pull(skb, sizeof(event->hdr)); + BUILD_BUG_ON(ARRAY_SIZE(status->chain_signal) != ARRAY_SIZE(arg.rssi)); + + for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { + status->chains &= ~BIT(i); + rssi = __le32_to_cpu(arg.rssi[i]); + ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt rssi[%d]:%d\n", i, arg.rssi[i]); + + if (rssi != ATH10K_INVALID_RSSI && rssi != 0) { + status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR + rssi; + status->chains |= BIT(i); + } + } + + status->rate_idx = ath10k_mac_bitrate_to_idx(sband, rate / 100); hdr = (struct ieee80211_hdr *)skb->data; fc = le16_to_cpu(hdr->frame_control); - if (fc & IEEE80211_FCTL_PROTECTED) { - status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED | - RX_FLAG_MMIC_STRIPPED; - hdr->frame_control = __cpu_to_le16(fc & + /* Firmware is guaranteed to report all essential management frames via + * WMI while it can deliver some extra via HTT. Since there can be + * duplicates split the reporting wrt monitor/sniffing. + */ + status->flag |= RX_FLAG_SKIP_MONITOR; + + ath10k_wmi_handle_wep_reauth(ar, skb, status); + + if (ath10k_wmi_rx_is_decrypted(ar, hdr)) { + status->flag |= RX_FLAG_DECRYPTED; + + if (!ieee80211_is_action(hdr->frame_control) && + !ieee80211_is_deauth(hdr->frame_control) && + !ieee80211_is_disassoc(hdr->frame_control)) { + status->flag |= RX_FLAG_IV_STRIPPED | + RX_FLAG_MMIC_STRIPPED; + hdr->frame_control = __cpu_to_le16(fc & ~IEEE80211_FCTL_PROTECTED); + } } - ath10k_dbg(ATH10K_DBG_MGMT, + if (ieee80211_is_beacon(hdr->frame_control)) + ath10k_mac_handle_beacon(ar, skb); + + if (ieee80211_is_beacon(hdr->frame_control) || + ieee80211_is_probe_resp(hdr->frame_control)) + status->boottime_ns = ktime_get_boottime_ns(); + + ath10k_dbg(ar, ATH10K_DBG_MGMT, "event mgmt rx skb %p len %d ftype %02x stype %02x\n", skb, skb->len, fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); - ath10k_dbg(ATH10K_DBG_MGMT, + ath10k_dbg(ar, ATH10K_DBG_MGMT, "event mgmt rx freq %d band %d snr %d, rate_idx %d\n", status->freq, status->band, status->signal, status->rate_idx); - /* - * packets from HTC come aligned to 4byte boundaries - * because they can originally come in along with a trailer - */ - skb_trim(skb, buf_len); + ieee80211_rx_ni(ar->hw, skb); - ieee80211_rx(ar->hw, skb); return 0; } -static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) +static int freq_to_idx(struct ath10k *ar, int freq) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_CHAN_INFO_EVENTID\n"); + struct ieee80211_supported_band *sband; + int band, ch, idx = 0; + + for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { + sband = ar->hw->wiphy->bands[band]; + if (!sband) + continue; + + for (ch = 0; ch < sband->n_channels; ch++, idx++) + if (sband->channels[ch].center_freq == freq) + goto exit; + } + +exit: + return idx; } -static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb) +static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb, + struct wmi_ch_info_ev_arg *arg) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n"); + struct wmi_chan_info_event *ev = (void *)skb->data; + + if (skb->len < sizeof(*ev)) + return -EPROTO; + + skb_pull(skb, sizeof(*ev)); + arg->err_code = ev->err_code; + arg->freq = ev->freq; + arg->cmd_flags = ev->cmd_flags; + arg->noise_floor = ev->noise_floor; + arg->rx_clear_count = ev->rx_clear_count; + arg->cycle_count = ev->cycle_count; + + return 0; } -static void ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb) +static int ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_ch_info_ev_arg *arg) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_MESG_EVENTID\n"); + struct wmi_10_4_chan_info_event *ev = (void *)skb->data; + + if (skb->len < sizeof(*ev)) + return -EPROTO; + + skb_pull(skb, sizeof(*ev)); + arg->err_code = ev->err_code; + arg->freq = ev->freq; + arg->cmd_flags = ev->cmd_flags; + arg->noise_floor = ev->noise_floor; + arg->rx_clear_count = ev->rx_clear_count; + arg->cycle_count = ev->cycle_count; + arg->chan_tx_pwr_range = ev->chan_tx_pwr_range; + arg->chan_tx_pwr_tp = ev->chan_tx_pwr_tp; + arg->rx_frame_count = ev->rx_frame_count; + + return 0; } -static void ath10k_wmi_event_update_stats(struct ath10k *ar, - struct sk_buff *skb) +/* + * Handle the channel info event for firmware which only sends one + * chan_info event per scanned channel. + */ +static void ath10k_wmi_event_chan_info_unpaired(struct ath10k *ar, + struct chan_info_params *params) { - struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data; + struct survey_info *survey; + int idx; + + if (params->cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) { + ath10k_dbg(ar, ATH10K_DBG_WMI, "chan info report completed\n"); + return; + } + + idx = freq_to_idx(ar, params->freq); + if (idx >= ARRAY_SIZE(ar->survey)) { + ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n", + params->freq, idx); + return; + } + + survey = &ar->survey[idx]; + + if (!params->mac_clk_mhz) + return; - ath10k_dbg(ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n"); + memset(survey, 0, sizeof(*survey)); - ath10k_debug_read_target_stats(ar, ev); + survey->noise = params->noise_floor; + survey->time = (params->cycle_count / params->mac_clk_mhz) / 1000; + survey->time_busy = (params->rx_clear_count / params->mac_clk_mhz) / 1000; + survey->filled |= SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME | + SURVEY_INFO_TIME_BUSY; } -static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, - struct sk_buff *skb) +/* + * Handle the channel info event for firmware which sends chan_info + * event in pairs(start and stop events) for every scanned channel. + */ +static void ath10k_wmi_event_chan_info_paired(struct ath10k *ar, + struct chan_info_params *params) { - struct wmi_vdev_start_response_event *ev; + struct survey_info *survey; + int idx; - ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n"); + idx = freq_to_idx(ar, params->freq); + if (idx >= ARRAY_SIZE(ar->survey)) { + ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n", + params->freq, idx); + return; + } - ev = (struct wmi_vdev_start_response_event *)skb->data; + if (params->cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) { + if (ar->ch_info_can_report_survey) { + survey = &ar->survey[idx]; + survey->noise = params->noise_floor; + survey->filled = SURVEY_INFO_NOISE_DBM; + + ath10k_hw_fill_survey_time(ar, + survey, + params->cycle_count, + params->rx_clear_count, + ar->survey_last_cycle_count, + ar->survey_last_rx_clear_count); + } + + ar->ch_info_can_report_survey = false; + } else { + ar->ch_info_can_report_survey = true; + } - if (WARN_ON(__le32_to_cpu(ev->status))) + if (!(params->cmd_flags & WMI_CHAN_INFO_FLAG_PRE_COMPLETE)) { + ar->survey_last_rx_clear_count = params->rx_clear_count; + ar->survey_last_cycle_count = params->cycle_count; + } +} + +void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) +{ + struct chan_info_params ch_info_param; + struct wmi_ch_info_ev_arg arg = {}; + int ret; + + ret = ath10k_wmi_pull_ch_info(ar, skb, &arg); + if (ret) { + ath10k_warn(ar, "failed to parse chan info event: %d\n", ret); return; + } + + ch_info_param.err_code = __le32_to_cpu(arg.err_code); + ch_info_param.freq = __le32_to_cpu(arg.freq); + ch_info_param.cmd_flags = __le32_to_cpu(arg.cmd_flags); + ch_info_param.noise_floor = __le32_to_cpu(arg.noise_floor); + ch_info_param.rx_clear_count = __le32_to_cpu(arg.rx_clear_count); + ch_info_param.cycle_count = __le32_to_cpu(arg.cycle_count); + ch_info_param.mac_clk_mhz = __le32_to_cpu(arg.mac_clk_mhz); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n", + ch_info_param.err_code, ch_info_param.freq, ch_info_param.cmd_flags, + ch_info_param.noise_floor, ch_info_param.rx_clear_count, + ch_info_param.cycle_count); + + spin_lock_bh(&ar->data_lock); + + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + case ATH10K_SCAN_STARTING: + ath10k_dbg(ar, ATH10K_DBG_WMI, "received chan info event without a scan request, ignoring\n"); + goto exit; + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: + break; + } + + if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL, + ar->running_fw->fw_file.fw_features)) + ath10k_wmi_event_chan_info_unpaired(ar, &ch_info_param); + else + ath10k_wmi_event_chan_info_paired(ar, &ch_info_param); + +exit: + spin_unlock_bh(&ar->data_lock); +} + +void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_echo_ev_arg arg = {}; + int ret; + + ret = ath10k_wmi_pull_echo_ev(ar, skb, &arg); + if (ret) { + ath10k_warn(ar, "failed to parse echo: %d\n", ret); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi event echo value 0x%08x\n", + le32_to_cpu(arg.value)); + + if (le32_to_cpu(arg.value) == ATH10K_WMI_BARRIER_ECHO_ID) + complete(&ar->wmi.barrier); +} + +int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb) +{ + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n", + skb->len); + + trace_ath10k_wmi_dbglog(ar, skb->data, skb->len); + + return 0; +} + +void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src, + struct ath10k_fw_stats_pdev *dst) +{ + dst->ch_noise_floor = __le32_to_cpu(src->chan_nf); + dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count); + dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count); + dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count); + dst->cycle_count = __le32_to_cpu(src->cycle_count); + dst->phy_err_count = __le32_to_cpu(src->phy_err_count); + dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr); +} + +void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src, + struct ath10k_fw_stats_pdev *dst) +{ + dst->comp_queued = __le32_to_cpu(src->comp_queued); + dst->comp_delivered = __le32_to_cpu(src->comp_delivered); + dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued); + dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued); + dst->wmm_drop = __le32_to_cpu(src->wmm_drop); + dst->local_enqued = __le32_to_cpu(src->local_enqued); + dst->local_freed = __le32_to_cpu(src->local_freed); + dst->hw_queued = __le32_to_cpu(src->hw_queued); + dst->hw_reaped = __le32_to_cpu(src->hw_reaped); + dst->underrun = __le32_to_cpu(src->underrun); + dst->tx_abort = __le32_to_cpu(src->tx_abort); + dst->mpdus_requeued = __le32_to_cpu(src->mpdus_requeued); + dst->tx_ko = __le32_to_cpu(src->tx_ko); + dst->data_rc = __le32_to_cpu(src->data_rc); + dst->self_triggers = __le32_to_cpu(src->self_triggers); + dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure); + dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err); + dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry); + dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout); + dst->pdev_resets = __le32_to_cpu(src->pdev_resets); + dst->phy_underrun = __le32_to_cpu(src->phy_underrun); + dst->txop_ovf = __le32_to_cpu(src->txop_ovf); +} + +static void +ath10k_wmi_10_4_pull_pdev_stats_tx(const struct wmi_10_4_pdev_stats_tx *src, + struct ath10k_fw_stats_pdev *dst) +{ + dst->comp_queued = __le32_to_cpu(src->comp_queued); + dst->comp_delivered = __le32_to_cpu(src->comp_delivered); + dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued); + dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued); + dst->wmm_drop = __le32_to_cpu(src->wmm_drop); + dst->local_enqued = __le32_to_cpu(src->local_enqued); + dst->local_freed = __le32_to_cpu(src->local_freed); + dst->hw_queued = __le32_to_cpu(src->hw_queued); + dst->hw_reaped = __le32_to_cpu(src->hw_reaped); + dst->underrun = __le32_to_cpu(src->underrun); + dst->tx_abort = __le32_to_cpu(src->tx_abort); + dst->mpdus_requeued = __le32_to_cpu(src->mpdus_requeued); + dst->tx_ko = __le32_to_cpu(src->tx_ko); + dst->data_rc = __le32_to_cpu(src->data_rc); + dst->self_triggers = __le32_to_cpu(src->self_triggers); + dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure); + dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err); + dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry); + dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout); + dst->pdev_resets = __le32_to_cpu(src->pdev_resets); + dst->phy_underrun = __le32_to_cpu(src->phy_underrun); + dst->txop_ovf = __le32_to_cpu(src->txop_ovf); + dst->hw_paused = __le32_to_cpu(src->hw_paused); + dst->seq_posted = __le32_to_cpu(src->seq_posted); + dst->seq_failed_queueing = + __le32_to_cpu(src->seq_failed_queueing); + dst->seq_completed = __le32_to_cpu(src->seq_completed); + dst->seq_restarted = __le32_to_cpu(src->seq_restarted); + dst->mu_seq_posted = __le32_to_cpu(src->mu_seq_posted); + dst->mpdus_sw_flush = __le32_to_cpu(src->mpdus_sw_flush); + dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter); + dst->mpdus_truncated = __le32_to_cpu(src->mpdus_truncated); + dst->mpdus_ack_failed = __le32_to_cpu(src->mpdus_ack_failed); + dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter); + dst->mpdus_expired = __le32_to_cpu(src->mpdus_expired); +} + +void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src, + struct ath10k_fw_stats_pdev *dst) +{ + dst->mid_ppdu_route_change = __le32_to_cpu(src->mid_ppdu_route_change); + dst->status_rcvd = __le32_to_cpu(src->status_rcvd); + dst->r0_frags = __le32_to_cpu(src->r0_frags); + dst->r1_frags = __le32_to_cpu(src->r1_frags); + dst->r2_frags = __le32_to_cpu(src->r2_frags); + dst->r3_frags = __le32_to_cpu(src->r3_frags); + dst->htt_msdus = __le32_to_cpu(src->htt_msdus); + dst->htt_mpdus = __le32_to_cpu(src->htt_mpdus); + dst->loc_msdus = __le32_to_cpu(src->loc_msdus); + dst->loc_mpdus = __le32_to_cpu(src->loc_mpdus); + dst->oversize_amsdu = __le32_to_cpu(src->oversize_amsdu); + dst->phy_errs = __le32_to_cpu(src->phy_errs); + dst->phy_err_drop = __le32_to_cpu(src->phy_err_drop); + dst->mpdu_errs = __le32_to_cpu(src->mpdu_errs); +} + +void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src, + struct ath10k_fw_stats_pdev *dst) +{ + dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad); + dst->rts_bad = __le32_to_cpu(src->rts_bad); + dst->rts_good = __le32_to_cpu(src->rts_good); + dst->fcs_bad = __le32_to_cpu(src->fcs_bad); + dst->no_beacons = __le32_to_cpu(src->no_beacons); + dst->mib_int_count = __le32_to_cpu(src->mib_int_count); +} + +void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src, + struct ath10k_fw_stats_peer *dst) +{ + ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr); + dst->peer_rssi = __le32_to_cpu(src->peer_rssi); + dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate); +} + +static void +ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats *src, + struct ath10k_fw_stats_peer *dst) +{ + ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr); + dst->peer_rssi = __le32_to_cpu(src->peer_rssi); + dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate); + dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate); +} + +static void +ath10k_wmi_10_4_pull_vdev_stats(const struct wmi_vdev_stats_extd *src, + struct ath10k_fw_stats_vdev_extd *dst) +{ + dst->vdev_id = __le32_to_cpu(src->vdev_id); + dst->ppdu_aggr_cnt = __le32_to_cpu(src->ppdu_aggr_cnt); + dst->ppdu_noack = __le32_to_cpu(src->ppdu_noack); + dst->mpdu_queued = __le32_to_cpu(src->mpdu_queued); + dst->ppdu_nonaggr_cnt = __le32_to_cpu(src->ppdu_nonaggr_cnt); + dst->mpdu_sw_requeued = __le32_to_cpu(src->mpdu_sw_requeued); + dst->mpdu_suc_retry = __le32_to_cpu(src->mpdu_suc_retry); + dst->mpdu_suc_multitry = __le32_to_cpu(src->mpdu_suc_multitry); + dst->mpdu_fail_retry = __le32_to_cpu(src->mpdu_fail_retry); + dst->tx_ftm_suc = __le32_to_cpu(src->tx_ftm_suc); + dst->tx_ftm_suc_retry = __le32_to_cpu(src->tx_ftm_suc_retry); + dst->tx_ftm_fail = __le32_to_cpu(src->tx_ftm_fail); + dst->rx_ftmr_cnt = __le32_to_cpu(src->rx_ftmr_cnt); + dst->rx_ftmr_dup_cnt = __le32_to_cpu(src->rx_ftmr_dup_cnt); + dst->rx_iftmr_cnt = __le32_to_cpu(src->rx_iftmr_cnt); + dst->rx_iftmr_dup_cnt = __le32_to_cpu(src->rx_iftmr_dup_cnt); +} + +static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar, + struct sk_buff *skb, + struct ath10k_fw_stats *stats) +{ + const struct wmi_stats_event *ev = (void *)skb->data; + u32 num_pdev_stats, num_peer_stats; + int i; + + if (!skb_pull(skb, sizeof(*ev))) + return -EPROTO; + + num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); + num_peer_stats = __le32_to_cpu(ev->num_peer_stats); + + for (i = 0; i < num_pdev_stats; i++) { + const struct wmi_pdev_stats *src; + struct ath10k_fw_stats_pdev *dst; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ath10k_wmi_pull_pdev_stats_base(&src->base, dst); + ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst); + ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst); + + list_add_tail(&dst->list, &stats->pdevs); + } + + /* fw doesn't implement vdev stats */ + + for (i = 0; i < num_peer_stats; i++) { + const struct wmi_peer_stats *src; + struct ath10k_fw_stats_peer *dst; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ath10k_wmi_pull_peer_stats(src, dst); + list_add_tail(&dst->list, &stats->peers); + } + + return 0; +} + +static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar, + struct sk_buff *skb, + struct ath10k_fw_stats *stats) +{ + const struct wmi_stats_event *ev = (void *)skb->data; + u32 num_pdev_stats, num_peer_stats; + int i; + + if (!skb_pull(skb, sizeof(*ev))) + return -EPROTO; + + num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); + num_peer_stats = __le32_to_cpu(ev->num_peer_stats); + + for (i = 0; i < num_pdev_stats; i++) { + const struct wmi_10x_pdev_stats *src; + struct ath10k_fw_stats_pdev *dst; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ath10k_wmi_pull_pdev_stats_base(&src->base, dst); + ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst); + ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst); + ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst); + + list_add_tail(&dst->list, &stats->pdevs); + } + + /* fw doesn't implement vdev stats */ + + for (i = 0; i < num_peer_stats; i++) { + const struct wmi_10x_peer_stats *src; + struct ath10k_fw_stats_peer *dst; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ath10k_wmi_pull_peer_stats(&src->old, dst); + + dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate); + + list_add_tail(&dst->list, &stats->peers); + } + + return 0; +} + +static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar, + struct sk_buff *skb, + struct ath10k_fw_stats *stats) +{ + const struct wmi_10_2_stats_event *ev = (void *)skb->data; + u32 num_pdev_stats; + u32 num_pdev_ext_stats; + u32 num_peer_stats; + int i; + + if (!skb_pull(skb, sizeof(*ev))) + return -EPROTO; + + num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); + num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats); + num_peer_stats = __le32_to_cpu(ev->num_peer_stats); + + for (i = 0; i < num_pdev_stats; i++) { + const struct wmi_10_2_pdev_stats *src; + struct ath10k_fw_stats_pdev *dst; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ath10k_wmi_pull_pdev_stats_base(&src->base, dst); + ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst); + ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst); + ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst); + /* FIXME: expose 10.2 specific values */ + + list_add_tail(&dst->list, &stats->pdevs); + } + + for (i = 0; i < num_pdev_ext_stats; i++) { + const struct wmi_10_2_pdev_ext_stats *src; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + /* FIXME: expose values to userspace + * + * Note: Even though this loop seems to do nothing it is + * required to parse following sub-structures properly. + */ + } + + /* fw doesn't implement vdev stats */ + + for (i = 0; i < num_peer_stats; i++) { + const struct wmi_10_2_peer_stats *src; + struct ath10k_fw_stats_peer *dst; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ath10k_wmi_pull_peer_stats(&src->old, dst); + + dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate); + /* FIXME: expose 10.2 specific values */ + + list_add_tail(&dst->list, &stats->peers); + } + + return 0; +} + +static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar, + struct sk_buff *skb, + struct ath10k_fw_stats *stats) +{ + const struct wmi_10_2_stats_event *ev = (void *)skb->data; + u32 num_pdev_stats; + u32 num_pdev_ext_stats; + u32 num_peer_stats; + int i; + + if (!skb_pull(skb, sizeof(*ev))) + return -EPROTO; + + num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); + num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats); + num_peer_stats = __le32_to_cpu(ev->num_peer_stats); + + for (i = 0; i < num_pdev_stats; i++) { + const struct wmi_10_2_pdev_stats *src; + struct ath10k_fw_stats_pdev *dst; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ath10k_wmi_pull_pdev_stats_base(&src->base, dst); + ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst); + ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst); + ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst); + /* FIXME: expose 10.2 specific values */ + + list_add_tail(&dst->list, &stats->pdevs); + } + + for (i = 0; i < num_pdev_ext_stats; i++) { + const struct wmi_10_2_pdev_ext_stats *src; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + /* FIXME: expose values to userspace + * + * Note: Even though this loop seems to do nothing it is + * required to parse following sub-structures properly. + */ + } + + /* fw doesn't implement vdev stats */ + + for (i = 0; i < num_peer_stats; i++) { + const struct wmi_10_2_4_ext_peer_stats *src; + struct ath10k_fw_stats_peer *dst; + int stats_len; + + if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) + stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats); + else + stats_len = sizeof(struct wmi_10_2_4_peer_stats); + + src = (void *)skb->data; + if (!skb_pull(skb, stats_len)) + return -EPROTO; + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ath10k_wmi_pull_peer_stats(&src->common.old, dst); + + dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate); + + if (ath10k_peer_stats_enabled(ar)) + dst->rx_duration = __le32_to_cpu(src->rx_duration); + /* FIXME: expose 10.2 specific values */ + + list_add_tail(&dst->list, &stats->peers); + } + + return 0; +} + +static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar, + struct sk_buff *skb, + struct ath10k_fw_stats *stats) +{ + const struct wmi_10_2_stats_event *ev = (void *)skb->data; + u32 num_pdev_stats; + u32 num_pdev_ext_stats; + u32 num_vdev_stats; + u32 num_peer_stats; + u32 num_bcnflt_stats; + u32 stats_id; + int i; + + if (!skb_pull(skb, sizeof(*ev))) + return -EPROTO; + + num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); + num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats); + num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); + num_peer_stats = __le32_to_cpu(ev->num_peer_stats); + num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats); + stats_id = __le32_to_cpu(ev->stats_id); + + for (i = 0; i < num_pdev_stats; i++) { + const struct wmi_10_4_pdev_stats *src; + struct ath10k_fw_stats_pdev *dst; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ath10k_wmi_pull_pdev_stats_base(&src->base, dst); + ath10k_wmi_10_4_pull_pdev_stats_tx(&src->tx, dst); + ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst); + dst->rx_ovfl_errs = __le32_to_cpu(src->rx_ovfl_errs); + ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst); + + list_add_tail(&dst->list, &stats->pdevs); + } + + for (i = 0; i < num_pdev_ext_stats; i++) { + const struct wmi_10_2_pdev_ext_stats *src; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + /* FIXME: expose values to userspace + * + * Note: Even though this loop seems to do nothing it is + * required to parse following sub-structures properly. + */ + } + + for (i = 0; i < num_vdev_stats; i++) { + const struct wmi_vdev_stats *src; + + /* Ignore vdev stats here as it has only vdev id. Actual vdev + * stats will be retrieved from vdev extended stats. + */ + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + } + + for (i = 0; i < num_peer_stats; i++) { + const struct wmi_10_4_peer_stats *src; + struct ath10k_fw_stats_peer *dst; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ath10k_wmi_10_4_pull_peer_stats(src, dst); + list_add_tail(&dst->list, &stats->peers); + } + + for (i = 0; i < num_bcnflt_stats; i++) { + const struct wmi_10_4_bss_bcn_filter_stats *src; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + /* FIXME: expose values to userspace + * + * Note: Even though this loop seems to do nothing it is + * required to parse following sub-structures properly. + */ + } + + if (stats_id & WMI_10_4_STAT_PEER_EXTD) { + stats->extended = true; + for (i = 0; i < num_peer_stats; i++) { + const struct wmi_10_4_peer_extd_stats *src; + struct ath10k_fw_extd_stats_peer *dst; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ether_addr_copy(dst->peer_macaddr, + src->peer_macaddr.addr); + dst->rx_duration = __le32_to_cpu(src->rx_duration); + list_add_tail(&dst->list, &stats->peers_extd); + } + } + + if (stats_id & WMI_10_4_STAT_VDEV_EXTD) { + for (i = 0; i < num_vdev_stats; i++) { + const struct wmi_vdev_stats_extd *src; + struct ath10k_fw_stats_vdev_extd *dst; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + ath10k_wmi_10_4_pull_vdev_stats(src, dst); + list_add_tail(&dst->list, &stats->vdevs); + } + } + + return 0; +} + +void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb) +{ + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n"); + ath10k_debug_fw_stats_process(ar, skb); +} + +static int +ath10k_wmi_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb, + struct wmi_vdev_start_ev_arg *arg) +{ + struct wmi_vdev_start_response_event *ev = (void *)skb->data; + + if (skb->len < sizeof(*ev)) + return -EPROTO; + + skb_pull(skb, sizeof(*ev)); + arg->vdev_id = ev->vdev_id; + arg->req_id = ev->req_id; + arg->resp_type = ev->resp_type; + arg->status = ev->status; + + return 0; +} + +void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_vdev_start_ev_arg arg = {}; + int ret; + u32 status; + + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n"); + + ar->last_wmi_vdev_start_status = 0; + + ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg); + if (ret) { + ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret); + ar->last_wmi_vdev_start_status = ret; + goto out; + } + + status = __le32_to_cpu(arg.status); + if (WARN_ON_ONCE(status)) { + ath10k_warn(ar, "vdev-start-response reports status error: %d (%s)\n", + status, (status == WMI_VDEV_START_CHAN_INVALID) ? + "chan-invalid" : "unknown"); + /* Setup is done one way or another though, so we should still + * do the completion, so don't return here. + */ + ar->last_wmi_vdev_start_status = -EINVAL; + } + +out: complete(&ar->vdev_setup_done); } -static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, - struct sk_buff *skb) +void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n"); complete(&ar->vdev_setup_done); } -static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, - struct sk_buff *skb) +static int +ath10k_wmi_op_pull_peer_kick_ev(struct ath10k *ar, struct sk_buff *skb, + struct wmi_peer_kick_ev_arg *arg) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_PEER_STA_KICKOUT_EVENTID\n"); + struct wmi_peer_sta_kickout_event *ev = (void *)skb->data; + + if (skb->len < sizeof(*ev)) + return -EPROTO; + + skb_pull(skb, sizeof(*ev)); + arg->mac_addr = ev->peer_macaddr.addr; + + return 0; +} + +void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_peer_kick_ev_arg arg = {}; + struct ieee80211_sta *sta; + int ret; + + ret = ath10k_wmi_pull_peer_kick(ar, skb, &arg); + if (ret) { + ath10k_warn(ar, "failed to parse peer kickout event: %d\n", + ret); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_STA, "wmi event peer sta kickout %pM\n", + arg.mac_addr); + + rcu_read_lock(); + + sta = ieee80211_find_sta_by_ifaddr(ar->hw, arg.mac_addr, NULL); + if (!sta) { + ath10k_warn(ar, "Spurious quick kickout for STA %pM\n", + arg.mac_addr); + goto exit; + } + + ieee80211_report_low_ack(sta, 10); + +exit: + rcu_read_unlock(); } /* @@ -463,31 +3575,43 @@ static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, static void ath10k_wmi_update_tim(struct ath10k *ar, struct ath10k_vif *arvif, struct sk_buff *bcn, - struct wmi_bcn_info *bcn_info) + const struct wmi_tim_info_arg *tim_info) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data; struct ieee80211_tim_ie *tim; u8 *ies, *ie; u8 ie_len, pvm_len; + __le32 t; + u32 v, tim_len; + + /* When FW reports 0 in tim_len, ensure at least first byte + * in tim_bitmap is considered for pvm calculation. + */ + tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1; /* if next SWBA has no tim_changed the tim_bitmap is garbage. - * we must copy the bitmap upon change and reuse it later */ - if (__le32_to_cpu(bcn_info->tim_info.tim_changed)) { + * we must copy the bitmap upon change and reuse it later + */ + if (__le32_to_cpu(tim_info->tim_changed)) { int i; - BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) != - sizeof(bcn_info->tim_info.tim_bitmap)); + if (sizeof(arvif->u.ap.tim_bitmap) < tim_len) { + ath10k_warn(ar, "SWBA TIM field is too big (%u), truncated it to %zu", + tim_len, sizeof(arvif->u.ap.tim_bitmap)); + tim_len = sizeof(arvif->u.ap.tim_bitmap); + } - for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) { - __le32 t = bcn_info->tim_info.tim_bitmap[i / 4]; - u32 v = __le32_to_cpu(t); + for (i = 0; i < tim_len; i++) { + t = tim_info->tim_bitmap[i / 4]; + v = __le32_to_cpu(t); arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF; } - /* FW reports either length 0 or 16 - * so we calculate this on our own */ + /* FW reports either length 0 or length based on max supported + * station. so we calculate this on our own + */ arvif->u.ap.tim_len = 0; - for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) + for (i = 0; i < tim_len; i++) if (arvif->u.ap.tim_bitmap[i]) arvif->u.ap.tim_len = i; @@ -501,8 +3625,8 @@ static void ath10k_wmi_update_tim(struct ath10k *ar, ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies, (u8 *)skb_tail_pointer(bcn) - ies); if (!ie) { - /* highly unlikely for mac80211 */ - ath10k_warn("no tim ie found;\n"); + if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS) + ath10k_warn(ar, "no tim ie found;\n"); return; } @@ -511,7 +3635,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar, pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */ if (pvm_len < arvif->u.ap.tim_len) { - int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len; + int expand_size = tim_len - pvm_len; int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len); void *next_ie = ie + 2 + ie_len; @@ -522,151 +3646,221 @@ static void ath10k_wmi_update_tim(struct ath10k *ar, ie_len += expand_size; pvm_len += expand_size; } else { - ath10k_warn("tim expansion failed\n"); + ath10k_warn(ar, "tim expansion failed\n"); } } - if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) { - ath10k_warn("tim pvm length is too great (%d)\n", pvm_len); + if (pvm_len > tim_len) { + ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len); return; } - tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast); + tim->bitmap_ctrl = !!__le32_to_cpu(tim_info->tim_mcast); memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len); - ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n", + if (tim->dtim_count == 0) { + ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DTIM_ZERO; + + if (__le32_to_cpu(tim_info->tim_mcast) == 1) + ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DELIVER_CAB; + } + + ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n", tim->dtim_count, tim->dtim_period, tim->bitmap_ctrl, pvm_len); } -static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len, - struct wmi_p2p_noa_info *noa) +static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif, + struct sk_buff *bcn, + const struct wmi_p2p_noa_info *noa) { - struct ieee80211_p2p_noa_attr *noa_attr; - u8 ctwindow_oppps = noa->ctwindow_oppps; - u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET; - bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT); - __le16 *noa_attr_len; - u16 attr_len; - u8 noa_descriptors = noa->num_descriptors; - int i; + if (!arvif->vif->p2p) + return; + + ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed); + + if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) + ath10k_p2p_noa_update(arvif, noa); + + if (arvif->u.ap.noa_data) + if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC)) + skb_put_data(bcn, arvif->u.ap.noa_data, + arvif->u.ap.noa_len); +} - /* P2P IE */ - data[0] = WLAN_EID_VENDOR_SPECIFIC; - data[1] = len - 2; - data[2] = (WLAN_OUI_WFA >> 16) & 0xff; - data[3] = (WLAN_OUI_WFA >> 8) & 0xff; - data[4] = (WLAN_OUI_WFA >> 0) & 0xff; - data[5] = WLAN_OUI_TYPE_WFA_P2P; +static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb, + struct wmi_swba_ev_arg *arg) +{ + struct wmi_host_swba_event *ev = (void *)skb->data; + u32 map; + size_t i; - /* NOA ATTR */ - data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE; - noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */ - noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9]; + if (skb->len < sizeof(*ev)) + return -EPROTO; - noa_attr->index = noa->index; - noa_attr->oppps_ctwindow = ctwindow; - if (oppps) - noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT; + skb_pull(skb, sizeof(*ev)); + arg->vdev_map = ev->vdev_map; + + for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) { + if (!(map & BIT(0))) + continue; + + /* If this happens there were some changes in firmware and + * ath10k should update the max size of tim_info array. + */ + if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info))) + break; + + if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) > + sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) { + ath10k_warn(ar, "refusing to parse invalid swba structure\n"); + return -EPROTO; + } - for (i = 0; i < noa_descriptors; i++) { - noa_attr->desc[i].count = - __le32_to_cpu(noa->descriptors[i].type_count); - noa_attr->desc[i].duration = noa->descriptors[i].duration; - noa_attr->desc[i].interval = noa->descriptors[i].interval; - noa_attr->desc[i].start_time = noa->descriptors[i].start_time; + arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len; + arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast; + arg->tim_info[i].tim_bitmap = + ev->bcn_info[i].tim_info.tim_bitmap; + arg->tim_info[i].tim_changed = + ev->bcn_info[i].tim_info.tim_changed; + arg->tim_info[i].tim_num_ps_pending = + ev->bcn_info[i].tim_info.tim_num_ps_pending; + + arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info; + i++; } - attr_len = 2; /* index + oppps_ctwindow */ - attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc); - *noa_attr_len = __cpu_to_le16(attr_len); + return 0; } -static u32 ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info *noa) +static int ath10k_wmi_10_2_4_op_pull_swba_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_swba_ev_arg *arg) { - u32 len = 0; - u8 noa_descriptors = noa->num_descriptors; - u8 opp_ps_info = noa->ctwindow_oppps; - bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT); + struct wmi_10_2_4_host_swba_event *ev = (void *)skb->data; + u32 map; + size_t i; + if (skb->len < sizeof(*ev)) + return -EPROTO; - if (!noa_descriptors && !opps_enabled) - return len; + skb_pull(skb, sizeof(*ev)); + arg->vdev_map = ev->vdev_map; - len += 1 + 1 + 4; /* EID + len + OUI */ - len += 1 + 2; /* noa attr + attr len */ - len += 1 + 1; /* index + oppps_ctwindow */ - len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc); + for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) { + if (!(map & BIT(0))) + continue; - return len; + /* If this happens there were some changes in firmware and + * ath10k should update the max size of tim_info array. + */ + if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info))) + break; + + if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) > + sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) { + ath10k_warn(ar, "refusing to parse invalid swba structure\n"); + return -EPROTO; + } + + arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len; + arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast; + arg->tim_info[i].tim_bitmap = + ev->bcn_info[i].tim_info.tim_bitmap; + arg->tim_info[i].tim_changed = + ev->bcn_info[i].tim_info.tim_changed; + arg->tim_info[i].tim_num_ps_pending = + ev->bcn_info[i].tim_info.tim_num_ps_pending; + i++; + } + + return 0; } -static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif, - struct sk_buff *bcn, - struct wmi_bcn_info *bcn_info) +static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_swba_ev_arg *arg) { - struct wmi_p2p_noa_info *noa = &bcn_info->p2p_noa_info; - u8 *new_data, *old_data = arvif->u.ap.noa_data; - u32 new_len; + struct wmi_10_4_host_swba_event *ev = (void *)skb->data; + u32 map, tim_len; + size_t i; - if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) - return; + if (skb->len < sizeof(*ev)) + return -EPROTO; - ath10k_dbg(ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed); - if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) { - new_len = ath10k_p2p_calc_noa_ie_len(noa); - if (!new_len) - goto cleanup; + skb_pull(skb, sizeof(*ev)); + arg->vdev_map = ev->vdev_map; - new_data = kmalloc(new_len, GFP_ATOMIC); - if (!new_data) - goto cleanup; + for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) { + if (!(map & BIT(0))) + continue; - ath10k_p2p_fill_noa_ie(new_data, new_len, noa); + /* If this happens there were some changes in firmware and + * ath10k should update the max size of tim_info array. + */ + if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info))) + break; - spin_lock_bh(&ar->data_lock); - arvif->u.ap.noa_data = new_data; - arvif->u.ap.noa_len = new_len; - spin_unlock_bh(&ar->data_lock); - kfree(old_data); - } + if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) > + sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) { + ath10k_warn(ar, "refusing to parse invalid swba structure\n"); + return -EPROTO; + } - if (arvif->u.ap.noa_data) - if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC)) - memcpy(skb_put(bcn, arvif->u.ap.noa_len), - arvif->u.ap.noa_data, - arvif->u.ap.noa_len); - return; + tim_len = __le32_to_cpu(ev->bcn_info[i].tim_info.tim_len); + if (tim_len) { + /* Exclude 4 byte guard length */ + tim_len -= 4; + arg->tim_info[i].tim_len = __cpu_to_le32(tim_len); + } else { + arg->tim_info[i].tim_len = 0; + } -cleanup: - spin_lock_bh(&ar->data_lock); - arvif->u.ap.noa_data = NULL; - arvif->u.ap.noa_len = 0; - spin_unlock_bh(&ar->data_lock); - kfree(old_data); + arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast; + arg->tim_info[i].tim_bitmap = + ev->bcn_info[i].tim_info.tim_bitmap; + arg->tim_info[i].tim_changed = + ev->bcn_info[i].tim_info.tim_changed; + arg->tim_info[i].tim_num_ps_pending = + ev->bcn_info[i].tim_info.tim_num_ps_pending; + + /* 10.4 firmware doesn't have p2p support. notice of absence + * info can be ignored for now. + */ + + i++; + } + + return 0; } +static enum wmi_txbf_conf ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k *ar) +{ + return WMI_TXBF_CONF_BEFORE_ASSOC; +} -static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) +void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) { - struct wmi_host_swba_event *ev; + struct wmi_swba_ev_arg arg = {}; u32 map; int i = -1; - struct wmi_bcn_info *bcn_info; + const struct wmi_tim_info_arg *tim_info; + const struct wmi_p2p_noa_info *noa_info; struct ath10k_vif *arvif; - struct wmi_bcn_tx_arg arg; struct sk_buff *bcn; - int vdev_id = 0; - int ret; + dma_addr_t paddr; + int ret, vdev_id = 0; - ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n"); + ret = ath10k_wmi_pull_swba(ar, skb, &arg); + if (ret) { + ath10k_warn(ar, "failed to parse swba event: %d\n", ret); + return; + } - ev = (struct wmi_host_swba_event *)skb->data; - map = __le32_to_cpu(ev->vdev_map); + map = __le32_to_cpu(arg.vdev_map); - ath10k_dbg(ATH10K_DBG_MGMT, "host swba:\n" - "-vdev map 0x%x\n", - ev->vdev_map); + ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n", + map); for (; map; map >>= 1, vdev_id++) { if (!(map & 0x1)) @@ -675,198 +3869,1727 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) i++; if (i >= WMI_MAX_AP_VDEV) { - ath10k_warn("swba has corrupted vdev map\n"); + ath10k_warn(ar, "swba has corrupted vdev map\n"); break; } - bcn_info = &ev->bcn_info[i]; + tim_info = &arg.tim_info[i]; + noa_info = arg.noa_info[i]; - ath10k_dbg(ATH10K_DBG_MGMT, - "-bcn_info[%d]:\n" - "--tim_len %d\n" - "--tim_mcast %d\n" - "--tim_changed %d\n" - "--tim_num_ps_pending %d\n" - "--tim_bitmap 0x%08x%08x%08x%08x\n", + ath10k_dbg(ar, ATH10K_DBG_MGMT, + "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n", i, - __le32_to_cpu(bcn_info->tim_info.tim_len), - __le32_to_cpu(bcn_info->tim_info.tim_mcast), - __le32_to_cpu(bcn_info->tim_info.tim_changed), - __le32_to_cpu(bcn_info->tim_info.tim_num_ps_pending), - __le32_to_cpu(bcn_info->tim_info.tim_bitmap[3]), - __le32_to_cpu(bcn_info->tim_info.tim_bitmap[2]), - __le32_to_cpu(bcn_info->tim_info.tim_bitmap[1]), - __le32_to_cpu(bcn_info->tim_info.tim_bitmap[0])); + __le32_to_cpu(tim_info->tim_len), + __le32_to_cpu(tim_info->tim_mcast), + __le32_to_cpu(tim_info->tim_changed), + __le32_to_cpu(tim_info->tim_num_ps_pending), + __le32_to_cpu(tim_info->tim_bitmap[3]), + __le32_to_cpu(tim_info->tim_bitmap[2]), + __le32_to_cpu(tim_info->tim_bitmap[1]), + __le32_to_cpu(tim_info->tim_bitmap[0])); + + /* TODO: Only first 4 word from tim_bitmap is dumped. + * Extend debug code to dump full tim_bitmap. + */ arvif = ath10k_get_arvif(ar, vdev_id); if (arvif == NULL) { - ath10k_warn("no vif for vdev_id %d found\n", vdev_id); + ath10k_warn(ar, "no vif for vdev_id %d found\n", + vdev_id); continue; } - bcn = ieee80211_beacon_get(ar->hw, arvif->vif); + /* mac80211 would have already asked us to stop beaconing and + * bring the vdev down, so continue in that case + */ + if (!arvif->is_up) + continue; + + /* There are no completions for beacons so wait for next SWBA + * before telling mac80211 to decrement CSA counter + * + * Once CSA counter is completed stop sending beacons until + * actual channel switch is done + */ + if (arvif->vif->bss_conf.csa_active && + ieee80211_beacon_cntdwn_is_complete(arvif->vif, 0)) { + ieee80211_csa_finish(arvif->vif, 0); + continue; + } + + bcn = ieee80211_beacon_get(ar->hw, arvif->vif, 0); if (!bcn) { - ath10k_warn("could not get mac80211 beacon\n"); + ath10k_warn(ar, "could not get mac80211 beacon\n"); continue; } - ath10k_tx_h_seq_no(bcn); - ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info); - ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info); + ath10k_tx_h_seq_no(arvif->vif, bcn); + ath10k_wmi_update_tim(ar, arvif, bcn, tim_info); + ath10k_wmi_update_noa(ar, arvif, bcn, noa_info); - arg.vdev_id = arvif->vdev_id; - arg.tx_rate = 0; - arg.tx_power = 0; - arg.bcn = bcn->data; - arg.bcn_len = bcn->len; + spin_lock_bh(&ar->data_lock); - ret = ath10k_wmi_beacon_send(ar, &arg); - if (ret) - ath10k_warn("could not send beacon (%d)\n", ret); + if (arvif->beacon) { + switch (arvif->beacon_state) { + case ATH10K_BEACON_SENT: + break; + case ATH10K_BEACON_SCHEDULED: + ath10k_warn(ar, "SWBA overrun on vdev %d, skipped old beacon\n", + arvif->vdev_id); + break; + case ATH10K_BEACON_SENDING: + ath10k_warn(ar, "SWBA overrun on vdev %d, skipped new beacon\n", + arvif->vdev_id); + dev_kfree_skb(bcn); + goto skip; + } + + ath10k_mac_vif_beacon_free(arvif); + } - dev_kfree_skb_any(bcn); + if (!arvif->beacon_buf) { + paddr = dma_map_single(arvif->ar->dev, bcn->data, + bcn->len, DMA_TO_DEVICE); + ret = dma_mapping_error(arvif->ar->dev, paddr); + if (ret) { + ath10k_warn(ar, "failed to map beacon: %d\n", + ret); + dev_kfree_skb_any(bcn); + goto skip; + } + + ATH10K_SKB_CB(bcn)->paddr = paddr; + } else { + if (bcn->len > IEEE80211_MAX_FRAME_LEN) { + ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n", + bcn->len, IEEE80211_MAX_FRAME_LEN); + skb_trim(bcn, IEEE80211_MAX_FRAME_LEN); + } + memcpy(arvif->beacon_buf, bcn->data, bcn->len); + ATH10K_SKB_CB(bcn)->paddr = arvif->beacon_paddr; + } + + arvif->beacon = bcn; + arvif->beacon_state = ATH10K_BEACON_SCHEDULED; + + trace_ath10k_tx_hdr(ar, bcn->data, bcn->len); + trace_ath10k_tx_payload(ar, bcn->data, bcn->len); + +skip: + spin_unlock_bh(&ar->data_lock); } + + ath10k_wmi_tx_beacons_nowait(ar); } -static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, - struct sk_buff *skb) +void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n"); } -static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) +static void ath10k_radar_detected(struct ath10k *ar) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_PHYERR_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n"); + ATH10K_DFS_STAT_INC(ar, radar_detected); + + /* Control radar events reporting in debugfs file + * dfs_block_radar_events + */ + if (ar->dfs_block_radar_events) + ath10k_info(ar, "DFS Radar detected, but ignored as requested\n"); + else + ieee80211_radar_detected(ar->hw, NULL); } -static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb) +static void ath10k_radar_confirmation_work(struct work_struct *work) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n"); + struct ath10k *ar = container_of(work, struct ath10k, + radar_confirmation_work); + struct ath10k_radar_found_info radar_info; + int ret, time_left; + + reinit_completion(&ar->wmi.radar_confirm); + + spin_lock_bh(&ar->data_lock); + memcpy(&radar_info, &ar->last_radar_info, sizeof(radar_info)); + spin_unlock_bh(&ar->data_lock); + + ret = ath10k_wmi_report_radar_found(ar, &radar_info); + if (ret) { + ath10k_warn(ar, "failed to send radar found %d\n", ret); + goto wait_complete; + } + + time_left = wait_for_completion_timeout(&ar->wmi.radar_confirm, + ATH10K_WMI_DFS_CONF_TIMEOUT_HZ); + if (time_left) { + /* DFS Confirmation status event received and + * necessary action completed. + */ + goto wait_complete; + } else { + /* DFS Confirmation event not received from FW.Considering this + * as real radar. + */ + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, + "dfs confirmation not received from fw, considering as radar\n"); + goto radar_detected; + } + +radar_detected: + ath10k_radar_detected(ar); + + /* Reset state to allow sending confirmation on consecutive radar + * detections, unless radar confirmation is disabled/stopped. + */ +wait_complete: + spin_lock_bh(&ar->data_lock); + if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_STOPPED) + ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE; + spin_unlock_bh(&ar->data_lock); +} + +static void ath10k_dfs_radar_report(struct ath10k *ar, + struct wmi_phyerr_ev_arg *phyerr, + const struct phyerr_radar_report *rr, + u64 tsf) +{ + u32 reg0, reg1, tsf32l; + struct ieee80211_channel *ch; + struct pulse_event pe; + struct radar_detector_specs rs; + u64 tsf64; + u8 rssi, width; + struct ath10k_radar_found_info *radar_info; + + reg0 = __le32_to_cpu(rr->reg0); + reg1 = __le32_to_cpu(rr->reg1); + + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, + "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n", + MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP), + MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH), + MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN), + MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF)); + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, + "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n", + MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK), + MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX), + MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID), + MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN), + MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK)); + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, + "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n", + MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET), + MS(reg1, RADAR_REPORT_REG1_PULSE_DUR)); + + if (!ar->dfs_detector) + return; + + spin_lock_bh(&ar->data_lock); + ch = ar->rx_channel; + + /* fetch target operating channel during channel change */ + if (!ch) + ch = ar->tgt_oper_chan; + + spin_unlock_bh(&ar->data_lock); + + if (!ch) { + ath10k_warn(ar, "failed to derive channel for radar pulse, treating as radar\n"); + goto radar_detected; + } + + /* report event to DFS pattern detector */ + tsf32l = phyerr->tsf_timestamp; + tsf64 = tsf & (~0xFFFFFFFFULL); + tsf64 |= tsf32l; + + width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR); + rssi = phyerr->rssi_combined; + + /* hardware store this as 8 bit signed value, + * set to zero if negative number + */ + if (rssi & 0x80) + rssi = 0; + + pe.ts = tsf64; + pe.freq = ch->center_freq; + pe.width = width; + pe.rssi = rssi; + pe.chirp = (MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP) != 0); + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, + "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n", + pe.freq, pe.width, pe.rssi, pe.ts); + + ATH10K_DFS_STAT_INC(ar, pulses_detected); + + if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe, &rs)) { + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, + "dfs no pulse pattern detected, yet\n"); + return; + } + + if ((test_bit(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, ar->wmi.svc_map)) && + ar->dfs_detector->region == NL80211_DFS_FCC) { + /* Consecutive radar indications need not be + * sent to the firmware until we get confirmation + * for the previous detected radar. + */ + spin_lock_bh(&ar->data_lock); + if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_IDLE) { + spin_unlock_bh(&ar->data_lock); + return; + } + ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_INPROGRESS; + radar_info = &ar->last_radar_info; + + radar_info->pri_min = rs.pri_min; + radar_info->pri_max = rs.pri_max; + radar_info->width_min = rs.width_min; + radar_info->width_max = rs.width_max; + /*TODO Find sidx_min and sidx_max */ + radar_info->sidx_min = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX); + radar_info->sidx_max = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX); + + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, + "sending wmi radar found cmd pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n", + radar_info->pri_min, radar_info->pri_max, + radar_info->width_min, radar_info->width_max, + radar_info->sidx_min, radar_info->sidx_max); + ieee80211_queue_work(ar->hw, &ar->radar_confirmation_work); + spin_unlock_bh(&ar->data_lock); + return; + } + +radar_detected: + ath10k_radar_detected(ar); } -static void ath10k_wmi_event_profile_match(struct ath10k *ar, - struct sk_buff *skb) +static int ath10k_dfs_fft_report(struct ath10k *ar, + struct wmi_phyerr_ev_arg *phyerr, + const struct phyerr_fft_report *fftr, + u64 tsf) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n"); + u32 reg0, reg1; + u8 rssi, peak_mag; + + reg0 = __le32_to_cpu(fftr->reg0); + reg1 = __le32_to_cpu(fftr->reg1); + rssi = phyerr->rssi_combined; + + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, + "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n", + MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB), + MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB), + MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX), + MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX)); + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, + "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n", + MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB), + MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB), + MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG), + MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB)); + + peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG); + + /* false event detection */ + if (rssi == DFS_RSSI_POSSIBLY_FALSE && + peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) { + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n"); + ATH10K_DFS_STAT_INC(ar, pulses_discarded); + return -EINVAL; + } + + return 0; } -static void ath10k_wmi_event_debug_print(struct ath10k *ar, - struct sk_buff *skb) +void ath10k_wmi_event_dfs(struct ath10k *ar, + struct wmi_phyerr_ev_arg *phyerr, + u64 tsf) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_PRINT_EVENTID\n"); + int buf_len, tlv_len, res, i = 0; + const struct phyerr_tlv *tlv; + const struct phyerr_radar_report *rr; + const struct phyerr_fft_report *fftr; + const u8 *tlv_buf; + + buf_len = phyerr->buf_len; + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, + "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n", + phyerr->phy_err_code, phyerr->rssi_combined, + phyerr->tsf_timestamp, tsf, buf_len); + + /* Skip event if DFS disabled */ + if (!IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) + return; + + ATH10K_DFS_STAT_INC(ar, pulses_total); + + while (i < buf_len) { + if (i + sizeof(*tlv) > buf_len) { + ath10k_warn(ar, "too short buf for tlv header (%d)\n", + i); + return; + } + + tlv = (struct phyerr_tlv *)&phyerr->buf[i]; + tlv_len = __le16_to_cpu(tlv->len); + tlv_buf = &phyerr->buf[i + sizeof(*tlv)]; + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, + "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n", + tlv_len, tlv->tag, tlv->sig); + + switch (tlv->tag) { + case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY: + if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) { + ath10k_warn(ar, "too short radar pulse summary (%d)\n", + i); + return; + } + + rr = (struct phyerr_radar_report *)tlv_buf; + ath10k_dfs_radar_report(ar, phyerr, rr, tsf); + break; + case PHYERR_TLV_TAG_SEARCH_FFT_REPORT: + if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) { + ath10k_warn(ar, "too short fft report (%d)\n", + i); + return; + } + + fftr = (struct phyerr_fft_report *)tlv_buf; + res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf); + if (res) + return; + break; + } + + i += sizeof(*tlv) + tlv_len; + } } -static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb) +void ath10k_wmi_event_spectral_scan(struct ath10k *ar, + struct wmi_phyerr_ev_arg *phyerr, + u64 tsf) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n"); + int buf_len, tlv_len, res, i = 0; + struct phyerr_tlv *tlv; + const void *tlv_buf; + const struct phyerr_fft_report *fftr; + size_t fftr_len; + + buf_len = phyerr->buf_len; + + while (i < buf_len) { + if (i + sizeof(*tlv) > buf_len) { + ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n", + i); + return; + } + + tlv = (struct phyerr_tlv *)&phyerr->buf[i]; + tlv_len = __le16_to_cpu(tlv->len); + tlv_buf = &phyerr->buf[i + sizeof(*tlv)]; + + if (i + sizeof(*tlv) + tlv_len > buf_len) { + ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n", + i); + return; + } + + switch (tlv->tag) { + case PHYERR_TLV_TAG_SEARCH_FFT_REPORT: + if (sizeof(*fftr) > tlv_len) { + ath10k_warn(ar, "failed to parse fft report at byte %d\n", + i); + return; + } + + fftr_len = tlv_len - sizeof(*fftr); + fftr = tlv_buf; + res = ath10k_spectral_process_fft(ar, phyerr, + fftr, fftr_len, + tsf); + if (res < 0) { + ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n", + res); + return; + } + break; + } + + i += sizeof(*tlv) + tlv_len; + } } -static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, - struct sk_buff *skb) +static int ath10k_wmi_op_pull_phyerr_ev_hdr(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_phyerr_hdr_arg *arg) +{ + struct wmi_phyerr_event *ev = (void *)skb->data; + + if (skb->len < sizeof(*ev)) + return -EPROTO; + + arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs); + arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32); + arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32); + arg->buf_len = skb->len - sizeof(*ev); + arg->phyerrs = ev->phyerrs; + + return 0; +} + +static int ath10k_wmi_10_4_op_pull_phyerr_ev_hdr(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_phyerr_hdr_arg *arg) +{ + struct wmi_10_4_phyerr_event *ev = (void *)skb->data; + + if (skb->len < sizeof(*ev)) + return -EPROTO; + + /* 10.4 firmware always reports only one phyerr */ + arg->num_phyerrs = 1; + + arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32); + arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32); + arg->buf_len = skb->len; + arg->phyerrs = skb->data; + + return 0; +} + +int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, + const void *phyerr_buf, + int left_len, + struct wmi_phyerr_ev_arg *arg) +{ + const struct wmi_phyerr *phyerr = phyerr_buf; + int i; + + if (left_len < sizeof(*phyerr)) { + ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n", + left_len, sizeof(*phyerr)); + return -EINVAL; + } + + arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp); + arg->freq1 = __le16_to_cpu(phyerr->freq1); + arg->freq2 = __le16_to_cpu(phyerr->freq2); + arg->rssi_combined = phyerr->rssi_combined; + arg->chan_width_mhz = phyerr->chan_width_mhz; + arg->buf_len = __le32_to_cpu(phyerr->buf_len); + arg->buf = phyerr->buf; + arg->hdr_len = sizeof(*phyerr); + + for (i = 0; i < 4; i++) + arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]); + + switch (phyerr->phy_err_code) { + case PHY_ERROR_GEN_SPECTRAL_SCAN: + arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN; + break; + case PHY_ERROR_GEN_FALSE_RADAR_EXT: + arg->phy_err_code = PHY_ERROR_FALSE_RADAR_EXT; + break; + case PHY_ERROR_GEN_RADAR: + arg->phy_err_code = PHY_ERROR_RADAR; + break; + default: + arg->phy_err_code = PHY_ERROR_UNKNOWN; + break; + } + + return 0; +} + +static int ath10k_wmi_10_4_op_pull_phyerr_ev(struct ath10k *ar, + const void *phyerr_buf, + int left_len, + struct wmi_phyerr_ev_arg *arg) +{ + const struct wmi_10_4_phyerr_event *phyerr = phyerr_buf; + u32 phy_err_mask; + int i; + + if (left_len < sizeof(*phyerr)) { + ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n", + left_len, sizeof(*phyerr)); + return -EINVAL; + } + + arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp); + arg->freq1 = __le16_to_cpu(phyerr->freq1); + arg->freq2 = __le16_to_cpu(phyerr->freq2); + arg->rssi_combined = phyerr->rssi_combined; + arg->chan_width_mhz = phyerr->chan_width_mhz; + arg->buf_len = __le32_to_cpu(phyerr->buf_len); + arg->buf = phyerr->buf; + arg->hdr_len = sizeof(*phyerr); + + for (i = 0; i < 4; i++) + arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]); + + phy_err_mask = __le32_to_cpu(phyerr->phy_err_mask[0]); + + if (phy_err_mask & PHY_ERROR_10_4_SPECTRAL_SCAN_MASK) + arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN; + else if (phy_err_mask & PHY_ERROR_10_4_RADAR_MASK) + arg->phy_err_code = PHY_ERROR_RADAR; + else + arg->phy_err_code = PHY_ERROR_UNKNOWN; + + return 0; +} + +void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_phyerr_hdr_arg hdr_arg = {}; + struct wmi_phyerr_ev_arg phyerr_arg = {}; + const void *phyerr; + u32 count, i, buf_len, phy_err_code; + u64 tsf; + int left_len, ret; + + ATH10K_DFS_STAT_INC(ar, phy_errors); + + ret = ath10k_wmi_pull_phyerr_hdr(ar, skb, &hdr_arg); + if (ret) { + ath10k_warn(ar, "failed to parse phyerr event hdr: %d\n", ret); + return; + } + + /* Check number of included events */ + count = hdr_arg.num_phyerrs; + + left_len = hdr_arg.buf_len; + + tsf = hdr_arg.tsf_u32; + tsf <<= 32; + tsf |= hdr_arg.tsf_l32; + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi event phyerr count %d tsf64 0x%llX\n", + count, tsf); + + phyerr = hdr_arg.phyerrs; + for (i = 0; i < count; i++) { + ret = ath10k_wmi_pull_phyerr(ar, phyerr, left_len, &phyerr_arg); + if (ret) { + ath10k_warn(ar, "failed to parse phyerr event (%d)\n", + i); + return; + } + + left_len -= phyerr_arg.hdr_len; + buf_len = phyerr_arg.buf_len; + phy_err_code = phyerr_arg.phy_err_code; + + if (left_len < buf_len) { + ath10k_warn(ar, "single event (%d) wrong buf len\n", i); + return; + } + + left_len -= buf_len; + + switch (phy_err_code) { + case PHY_ERROR_RADAR: + ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf); + break; + case PHY_ERROR_SPECTRAL_SCAN: + ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf); + break; + case PHY_ERROR_FALSE_RADAR_EXT: + ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf); + ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf); + break; + default: + break; + } + + phyerr = phyerr + phyerr_arg.hdr_len + buf_len; + } +} + +static int +ath10k_wmi_10_4_op_pull_dfs_status_ev(struct ath10k *ar, struct sk_buff *skb, + struct wmi_dfs_status_ev_arg *arg) +{ + struct wmi_dfs_status_ev_arg *ev = (void *)skb->data; + + if (skb->len < sizeof(*ev)) + return -EPROTO; + + arg->status = ev->status; + + return 0; +} + +static void +ath10k_wmi_event_dfs_status_check(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_dfs_status_ev_arg status_arg = {}; + int ret; + + ret = ath10k_wmi_pull_dfs_status(ar, skb, &status_arg); + + if (ret) { + ath10k_warn(ar, "failed to parse dfs status event: %d\n", ret); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, + "dfs status event received from fw: %d\n", + status_arg.status); + + /* Even in case of radar detection failure we follow the same + * behaviour as if radar is detected i.e to switch to a different + * channel. + */ + if (status_arg.status == WMI_HW_RADAR_DETECTED || + status_arg.status == WMI_RADAR_DETECTION_FAIL) + ath10k_radar_detected(ar); + complete(&ar->wmi.radar_confirm); +} + +void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_roam_ev_arg arg = {}; + int ret; + u32 vdev_id; + u32 reason; + s32 rssi; + + ret = ath10k_wmi_pull_roam_ev(ar, skb, &arg); + if (ret) { + ath10k_warn(ar, "failed to parse roam event: %d\n", ret); + return; + } + + vdev_id = __le32_to_cpu(arg.vdev_id); + reason = __le32_to_cpu(arg.reason); + rssi = __le32_to_cpu(arg.rssi); + rssi += WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT; + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi roam event vdev %u reason 0x%08x rssi %d\n", + vdev_id, reason, rssi); + + if (reason >= WMI_ROAM_REASON_MAX) + ath10k_warn(ar, "ignoring unknown roam event reason %d on vdev %i\n", + reason, vdev_id); + + switch (reason) { + case WMI_ROAM_REASON_BEACON_MISS: + ath10k_mac_handle_beacon_miss(ar, vdev_id); + break; + case WMI_ROAM_REASON_BETTER_AP: + case WMI_ROAM_REASON_LOW_RSSI: + case WMI_ROAM_REASON_SUITABLE_AP_FOUND: + case WMI_ROAM_REASON_HO_FAILED: + ath10k_warn(ar, "ignoring not implemented roam event reason %d on vdev %i\n", + reason, vdev_id); + break; + } +} + +void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n"); } -static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar, +void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb) +{ + char buf[101], c; + int i; + + for (i = 0; i < sizeof(buf) - 1; i++) { + if (i >= skb->len) + break; + + c = skb->data[i]; + + if (c == '\0') + break; + + if (isascii(c) && isprint(c)) + buf[i] = c; + else + buf[i] = '.'; + } + + if (i == sizeof(buf) - 1) + ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len); + + /* for some reason the debug prints end with \n, remove that */ + if (skb->data[i - 1] == '\n') + i--; + + /* the last byte is always reserved for the null character */ + buf[i] = '\0'; + + ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf); +} + +void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb) +{ + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n"); +} + +void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb) +{ + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n"); +} + +void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n"); } -static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar, +void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n"); } -static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, - struct sk_buff *skb) +void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n"); } -static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, - struct sk_buff *skb) +void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n"); + struct wmi_wow_ev_arg ev = {}; + int ret; + + complete(&ar->wow.wakeup_completed); + + ret = ath10k_wmi_pull_wow_event(ar, skb, &ev); + if (ret) { + ath10k_warn(ar, "failed to parse wow wakeup event: %d\n", ret); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wow wakeup host reason %s\n", + wow_reason(ev.wake_reason)); } -static void ath10k_wmi_event_dcs_interference(struct ath10k *ar, - struct sk_buff *skb) +void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n"); } -static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, - struct sk_buff *skb) +static u8 ath10k_tpc_config_get_rate(struct ath10k *ar, + struct wmi_pdev_tpc_config_event *ev, + u32 rate_idx, u32 num_chains, + u32 rate_code, u8 type) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n"); + u8 tpc, num_streams, preamble, ch, stm_idx; + + num_streams = ATH10K_HW_NSS(rate_code); + preamble = ATH10K_HW_PREAMBLE(rate_code); + ch = num_chains - 1; + + tpc = min_t(u8, ev->rates_array[rate_idx], ev->max_reg_allow_pow[ch]); + + if (__le32_to_cpu(ev->num_tx_chain) <= 1) + goto out; + + if (preamble == WMI_RATE_PREAMBLE_CCK) + goto out; + + stm_idx = num_streams - 1; + if (num_chains <= num_streams) + goto out; + + switch (type) { + case WMI_TPC_TABLE_TYPE_STBC: + tpc = min_t(u8, tpc, + ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx]); + break; + case WMI_TPC_TABLE_TYPE_TXBF: + tpc = min_t(u8, tpc, + ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx]); + break; + case WMI_TPC_TABLE_TYPE_CDD: + tpc = min_t(u8, tpc, + ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx]); + break; + default: + ath10k_warn(ar, "unknown wmi tpc table type: %d\n", type); + tpc = 0; + break; + } + +out: + return tpc; } -static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, - struct sk_buff *skb) +static void ath10k_tpc_config_disp_tables(struct ath10k *ar, + struct wmi_pdev_tpc_config_event *ev, + struct ath10k_tpc_stats *tpc_stats, + u8 *rate_code, u16 *pream_table, u8 type) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n"); + u32 i, j, pream_idx, flags; + u8 tpc[WMI_TPC_TX_N_CHAIN]; + char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE]; + char buff[WMI_TPC_BUF_SIZE]; + + flags = __le32_to_cpu(ev->flags); + + switch (type) { + case WMI_TPC_TABLE_TYPE_CDD: + if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) { + ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n"); + tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG; + return; + } + break; + case WMI_TPC_TABLE_TYPE_STBC: + if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) { + ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n"); + tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG; + return; + } + break; + case WMI_TPC_TABLE_TYPE_TXBF: + if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) { + ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n"); + tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG; + return; + } + break; + default: + ath10k_dbg(ar, ATH10K_DBG_WMI, + "invalid table type in wmi tpc event: %d\n", type); + return; + } + + pream_idx = 0; + for (i = 0; i < tpc_stats->rate_max; i++) { + memset(tpc_value, 0, sizeof(tpc_value)); + memset(buff, 0, sizeof(buff)); + if (i == pream_table[pream_idx]) + pream_idx++; + + for (j = 0; j < tpc_stats->num_tx_chain; j++) { + tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1, + rate_code[i], + type); + snprintf(buff, sizeof(buff), "%8d ", tpc[j]); + strlcat(tpc_value, buff, sizeof(tpc_value)); + } + tpc_stats->tpc_table[type].pream_idx[i] = pream_idx; + tpc_stats->tpc_table[type].rate_code[i] = rate_code[i]; + memcpy(tpc_stats->tpc_table[type].tpc_value[i], + tpc_value, sizeof(tpc_value)); + } } -static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, - struct sk_buff *skb) +void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table, + u32 num_tx_chain) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n"); + u32 i, j, pream_idx; + u8 rate_idx; + + /* Create the rate code table based on the chains supported */ + rate_idx = 0; + pream_idx = 0; + + /* Fill CCK rate code */ + for (i = 0; i < 4; i++) { + rate_code[rate_idx] = + ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_CCK); + rate_idx++; + } + pream_table[pream_idx] = rate_idx; + pream_idx++; + + /* Fill OFDM rate code */ + for (i = 0; i < 8; i++) { + rate_code[rate_idx] = + ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_OFDM); + rate_idx++; + } + pream_table[pream_idx] = rate_idx; + pream_idx++; + + /* Fill HT20 rate code */ + for (i = 0; i < num_tx_chain; i++) { + for (j = 0; j < 8; j++) { + rate_code[rate_idx] = + ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT); + rate_idx++; + } + } + pream_table[pream_idx] = rate_idx; + pream_idx++; + + /* Fill HT40 rate code */ + for (i = 0; i < num_tx_chain; i++) { + for (j = 0; j < 8; j++) { + rate_code[rate_idx] = + ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT); + rate_idx++; + } + } + pream_table[pream_idx] = rate_idx; + pream_idx++; + + /* Fill VHT20 rate code */ + for (i = 0; i < num_tx_chain; i++) { + for (j = 0; j < 10; j++) { + rate_code[rate_idx] = + ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT); + rate_idx++; + } + } + pream_table[pream_idx] = rate_idx; + pream_idx++; + + /* Fill VHT40 rate code */ + for (i = 0; i < num_tx_chain; i++) { + for (j = 0; j < 10; j++) { + rate_code[rate_idx] = + ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT); + rate_idx++; + } + } + pream_table[pream_idx] = rate_idx; + pream_idx++; + + /* Fill VHT80 rate code */ + for (i = 0; i < num_tx_chain; i++) { + for (j = 0; j < 10; j++) { + rate_code[rate_idx] = + ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT); + rate_idx++; + } + } + pream_table[pream_idx] = rate_idx; + pream_idx++; + + rate_code[rate_idx++] = + ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK); + rate_code[rate_idx++] = + ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM); + rate_code[rate_idx++] = + ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK); + rate_code[rate_idx++] = + ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM); + rate_code[rate_idx++] = + ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM); + + pream_table[pream_idx] = ATH10K_TPC_PREAM_TABLE_END; } -static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, - struct sk_buff *skb) +void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n"); + u32 num_tx_chain, rate_max; + u8 rate_code[WMI_TPC_RATE_MAX]; + u16 pream_table[WMI_TPC_PREAM_TABLE_MAX]; + struct wmi_pdev_tpc_config_event *ev; + struct ath10k_tpc_stats *tpc_stats; + + ev = (struct wmi_pdev_tpc_config_event *)skb->data; + + num_tx_chain = __le32_to_cpu(ev->num_tx_chain); + + if (num_tx_chain > WMI_TPC_TX_N_CHAIN) { + ath10k_warn(ar, "number of tx chain is %d greater than TPC configured tx chain %d\n", + num_tx_chain, WMI_TPC_TX_N_CHAIN); + return; + } + + rate_max = __le32_to_cpu(ev->rate_max); + if (rate_max > WMI_TPC_RATE_MAX) { + ath10k_warn(ar, "number of rate is %d greater than TPC configured rate %d\n", + rate_max, WMI_TPC_RATE_MAX); + rate_max = WMI_TPC_RATE_MAX; + } + + tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC); + if (!tpc_stats) + return; + + ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table, + num_tx_chain); + + tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq); + tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode); + tpc_stats->ctl = __le32_to_cpu(ev->ctl); + tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain); + tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain); + tpc_stats->twice_antenna_reduction = + __le32_to_cpu(ev->twice_antenna_reduction); + tpc_stats->power_limit = __le32_to_cpu(ev->power_limit); + tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power); + tpc_stats->num_tx_chain = num_tx_chain; + tpc_stats->rate_max = rate_max; + + ath10k_tpc_config_disp_tables(ar, ev, tpc_stats, + rate_code, pream_table, + WMI_TPC_TABLE_TYPE_CDD); + ath10k_tpc_config_disp_tables(ar, ev, tpc_stats, + rate_code, pream_table, + WMI_TPC_TABLE_TYPE_STBC); + ath10k_tpc_config_disp_tables(ar, ev, tpc_stats, + rate_code, pream_table, + WMI_TPC_TABLE_TYPE_TXBF); + + ath10k_debug_tpc_stats_process(ar, tpc_stats); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi event tpc config channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n", + __le32_to_cpu(ev->chan_freq), + __le32_to_cpu(ev->phy_mode), + __le32_to_cpu(ev->ctl), + __le32_to_cpu(ev->reg_domain), + a_sle32_to_cpu(ev->twice_antenna_gain), + __le32_to_cpu(ev->twice_antenna_reduction), + __le32_to_cpu(ev->power_limit), + __le32_to_cpu(ev->twice_max_rd_power) / 2, + __le32_to_cpu(ev->num_tx_chain), + __le32_to_cpu(ev->rate_max)); } -static void ath10k_wmi_event_delba_complete(struct ath10k *ar, - struct sk_buff *skb) +static u8 +ath10k_wmi_tpc_final_get_rate(struct ath10k *ar, + struct wmi_pdev_tpc_final_table_event *ev, + u32 rate_idx, u32 num_chains, + u32 rate_code, u8 type, u32 pream_idx) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n"); + u8 tpc, num_streams, preamble, ch, stm_idx; + s8 pow_agcdd, pow_agstbc, pow_agtxbf; + int pream; + + num_streams = ATH10K_HW_NSS(rate_code); + preamble = ATH10K_HW_PREAMBLE(rate_code); + ch = num_chains - 1; + stm_idx = num_streams - 1; + pream = -1; + + if (__le32_to_cpu(ev->chan_freq) <= 2483) { + switch (pream_idx) { + case WMI_TPC_PREAM_2GHZ_CCK: + pream = 0; + break; + case WMI_TPC_PREAM_2GHZ_OFDM: + pream = 1; + break; + case WMI_TPC_PREAM_2GHZ_HT20: + case WMI_TPC_PREAM_2GHZ_VHT20: + pream = 2; + break; + case WMI_TPC_PREAM_2GHZ_HT40: + case WMI_TPC_PREAM_2GHZ_VHT40: + pream = 3; + break; + case WMI_TPC_PREAM_2GHZ_VHT80: + pream = 4; + break; + default: + pream = -1; + break; + } + } + + if (__le32_to_cpu(ev->chan_freq) >= 5180) { + switch (pream_idx) { + case WMI_TPC_PREAM_5GHZ_OFDM: + pream = 0; + break; + case WMI_TPC_PREAM_5GHZ_HT20: + case WMI_TPC_PREAM_5GHZ_VHT20: + pream = 1; + break; + case WMI_TPC_PREAM_5GHZ_HT40: + case WMI_TPC_PREAM_5GHZ_VHT40: + pream = 2; + break; + case WMI_TPC_PREAM_5GHZ_VHT80: + pream = 3; + break; + case WMI_TPC_PREAM_5GHZ_HTCUP: + pream = 4; + break; + default: + pream = -1; + break; + } + } + + if (pream == -1) { + ath10k_warn(ar, "unknown wmi tpc final index and frequency: %u, %u\n", + pream_idx, __le32_to_cpu(ev->chan_freq)); + tpc = 0; + goto out; + } + + if (pream == 4) + tpc = min_t(u8, ev->rates_array[rate_idx], + ev->max_reg_allow_pow[ch]); + else + tpc = min_t(u8, min_t(u8, ev->rates_array[rate_idx], + ev->max_reg_allow_pow[ch]), + ev->ctl_power_table[0][pream][stm_idx]); + + if (__le32_to_cpu(ev->num_tx_chain) <= 1) + goto out; + + if (preamble == WMI_RATE_PREAMBLE_CCK) + goto out; + + if (num_chains <= num_streams) + goto out; + + switch (type) { + case WMI_TPC_TABLE_TYPE_STBC: + pow_agstbc = ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx]; + if (pream == 4) + tpc = min_t(u8, tpc, pow_agstbc); + else + tpc = min_t(u8, min_t(u8, tpc, pow_agstbc), + ev->ctl_power_table[0][pream][stm_idx]); + break; + case WMI_TPC_TABLE_TYPE_TXBF: + pow_agtxbf = ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx]; + if (pream == 4) + tpc = min_t(u8, tpc, pow_agtxbf); + else + tpc = min_t(u8, min_t(u8, tpc, pow_agtxbf), + ev->ctl_power_table[1][pream][stm_idx]); + break; + case WMI_TPC_TABLE_TYPE_CDD: + pow_agcdd = ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx]; + if (pream == 4) + tpc = min_t(u8, tpc, pow_agcdd); + else + tpc = min_t(u8, min_t(u8, tpc, pow_agcdd), + ev->ctl_power_table[0][pream][stm_idx]); + break; + default: + ath10k_warn(ar, "unknown wmi tpc final table type: %d\n", type); + tpc = 0; + break; + } + +out: + return tpc; } -static void ath10k_wmi_event_addba_complete(struct ath10k *ar, - struct sk_buff *skb) +static void +ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar, + struct wmi_pdev_tpc_final_table_event *ev, + struct ath10k_tpc_stats_final *tpc_stats, + u8 *rate_code, u16 *pream_table, u8 type) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n"); + u32 i, j, pream_idx, flags; + u8 tpc[WMI_TPC_TX_N_CHAIN]; + char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE]; + char buff[WMI_TPC_BUF_SIZE]; + + flags = __le32_to_cpu(ev->flags); + + switch (type) { + case WMI_TPC_TABLE_TYPE_CDD: + if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) { + ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n"); + tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG; + return; + } + break; + case WMI_TPC_TABLE_TYPE_STBC: + if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) { + ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n"); + tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG; + return; + } + break; + case WMI_TPC_TABLE_TYPE_TXBF: + if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) { + ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n"); + tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG; + return; + } + break; + default: + ath10k_dbg(ar, ATH10K_DBG_WMI, + "invalid table type in wmi tpc event: %d\n", type); + return; + } + + pream_idx = 0; + for (i = 0; i < tpc_stats->rate_max; i++) { + memset(tpc_value, 0, sizeof(tpc_value)); + memset(buff, 0, sizeof(buff)); + if (i == pream_table[pream_idx]) + pream_idx++; + + for (j = 0; j < tpc_stats->num_tx_chain; j++) { + tpc[j] = ath10k_wmi_tpc_final_get_rate(ar, ev, i, j + 1, + rate_code[i], + type, pream_idx); + snprintf(buff, sizeof(buff), "%8d ", tpc[j]); + strlcat(tpc_value, buff, sizeof(tpc_value)); + } + tpc_stats->tpc_table_final[type].pream_idx[i] = pream_idx; + tpc_stats->tpc_table_final[type].rate_code[i] = rate_code[i]; + memcpy(tpc_stats->tpc_table_final[type].tpc_value[i], + tpc_value, sizeof(tpc_value)); + } } -static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar, - struct sk_buff *skb) +void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n"); + u32 num_tx_chain, rate_max; + u8 rate_code[WMI_TPC_FINAL_RATE_MAX]; + u16 pream_table[WMI_TPC_PREAM_TABLE_MAX]; + struct wmi_pdev_tpc_final_table_event *ev; + struct ath10k_tpc_stats_final *tpc_stats; + + ev = (struct wmi_pdev_tpc_final_table_event *)skb->data; + + num_tx_chain = __le32_to_cpu(ev->num_tx_chain); + if (num_tx_chain > WMI_TPC_TX_N_CHAIN) { + ath10k_warn(ar, "number of tx chain is %d greater than TPC final configured tx chain %d\n", + num_tx_chain, WMI_TPC_TX_N_CHAIN); + return; + } + + rate_max = __le32_to_cpu(ev->rate_max); + if (rate_max > WMI_TPC_FINAL_RATE_MAX) { + ath10k_warn(ar, "number of rate is %d greater than TPC final configured rate %d\n", + rate_max, WMI_TPC_FINAL_RATE_MAX); + rate_max = WMI_TPC_FINAL_RATE_MAX; + } + + tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC); + if (!tpc_stats) + return; + + ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table, + num_tx_chain); + + tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq); + tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode); + tpc_stats->ctl = __le32_to_cpu(ev->ctl); + tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain); + tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain); + tpc_stats->twice_antenna_reduction = + __le32_to_cpu(ev->twice_antenna_reduction); + tpc_stats->power_limit = __le32_to_cpu(ev->power_limit); + tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power); + tpc_stats->num_tx_chain = num_tx_chain; + tpc_stats->rate_max = rate_max; + + ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats, + rate_code, pream_table, + WMI_TPC_TABLE_TYPE_CDD); + ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats, + rate_code, pream_table, + WMI_TPC_TABLE_TYPE_STBC); + ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats, + rate_code, pream_table, + WMI_TPC_TABLE_TYPE_TXBF); + + ath10k_debug_tpc_stats_final_process(ar, tpc_stats); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi event tpc final table channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n", + __le32_to_cpu(ev->chan_freq), + __le32_to_cpu(ev->phy_mode), + __le32_to_cpu(ev->ctl), + __le32_to_cpu(ev->reg_domain), + a_sle32_to_cpu(ev->twice_antenna_gain), + __le32_to_cpu(ev->twice_antenna_reduction), + __le32_to_cpu(ev->power_limit), + __le32_to_cpu(ev->twice_max_rd_power) / 2, + __le32_to_cpu(ev->num_tx_chain), + __le32_to_cpu(ev->rate_max)); } -static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, - struct sk_buff *skb) +static void +ath10k_wmi_handle_tdls_peer_event(struct ath10k *ar, struct sk_buff *skb) { - struct wmi_service_ready_event *ev = (void *)skb->data; + struct wmi_tdls_peer_event *ev; + struct ath10k_peer *peer; + struct ath10k_vif *arvif; + int vdev_id; + int peer_status; + int peer_reason; + u8 reason; if (skb->len < sizeof(*ev)) { - ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n", - skb->len, sizeof(*ev)); + ath10k_err(ar, "received tdls peer event with invalid size (%d bytes)\n", + skb->len); return; } - ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power); - ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power); - ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info); - ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info); + ev = (struct wmi_tdls_peer_event *)skb->data; + vdev_id = __le32_to_cpu(ev->vdev_id); + peer_status = __le32_to_cpu(ev->peer_status); + peer_reason = __le32_to_cpu(ev->peer_reason); + + spin_lock_bh(&ar->data_lock); + peer = ath10k_peer_find(ar, vdev_id, ev->peer_macaddr.addr); + spin_unlock_bh(&ar->data_lock); + + if (!peer) { + ath10k_warn(ar, "failed to find peer entry for %pM\n", + ev->peer_macaddr.addr); + return; + } + + switch (peer_status) { + case WMI_TDLS_SHOULD_TEARDOWN: + switch (peer_reason) { + case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT: + case WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE: + case WMI_TDLS_TEARDOWN_REASON_RSSI: + reason = WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE; + break; + default: + reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED; + break; + } + + arvif = ath10k_get_arvif(ar, vdev_id); + if (!arvif) { + ath10k_warn(ar, "received tdls peer event for invalid vdev id %u\n", + vdev_id); + return; + } + + ieee80211_tdls_oper_request(arvif->vif, ev->peer_macaddr.addr, + NL80211_TDLS_TEARDOWN, reason, + GFP_ATOMIC); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "received tdls teardown event for peer %pM reason %u\n", + ev->peer_macaddr.addr, peer_reason); + break; + default: + ath10k_dbg(ar, ATH10K_DBG_WMI, + "received unknown tdls peer event %u\n", + peer_status); + break; + } +} + +static void +ath10k_wmi_event_peer_sta_ps_state_chg(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_peer_sta_ps_state_chg_event *ev; + struct ieee80211_sta *sta; + struct ath10k_sta *arsta; + u8 peer_addr[ETH_ALEN]; + + lockdep_assert_held(&ar->data_lock); + + ev = (struct wmi_peer_sta_ps_state_chg_event *)skb->data; + ether_addr_copy(peer_addr, ev->peer_macaddr.addr); + + rcu_read_lock(); + + sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer_addr, NULL); + + if (!sta) { + ath10k_warn(ar, "failed to find station entry %pM\n", + peer_addr); + goto exit; + } + + arsta = (struct ath10k_sta *)sta->drv_priv; + arsta->peer_ps_state = __le32_to_cpu(ev->peer_ps_state); + +exit: + rcu_read_unlock(); +} + +void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb) +{ + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n"); +} + +void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, struct sk_buff *skb) +{ + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n"); +} + +void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb) +{ + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n"); +} + +void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb) +{ + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n"); +} + +void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb) +{ + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n"); +} + +void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n"); +} + +void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb) +{ + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n"); +} + +void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb) +{ + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n"); +} + +void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb) +{ + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n"); +} + +static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id, + u32 num_units, u32 unit_len) +{ + dma_addr_t paddr; + u32 pool_size; + int idx = ar->wmi.num_mem_chunks; + void *vaddr; + + pool_size = num_units * round_up(unit_len, 4); + vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL); + + if (!vaddr) + return -ENOMEM; + + ar->wmi.mem_chunks[idx].vaddr = vaddr; + ar->wmi.mem_chunks[idx].paddr = paddr; + ar->wmi.mem_chunks[idx].len = pool_size; + ar->wmi.mem_chunks[idx].req_id = req_id; + ar->wmi.num_mem_chunks++; + + return num_units; +} + +static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id, + u32 num_units, u32 unit_len) +{ + int ret; + + while (num_units) { + ret = ath10k_wmi_alloc_chunk(ar, req_id, num_units, unit_len); + if (ret < 0) + return ret; + + num_units -= ret; + } + + return 0; +} + +static bool +ath10k_wmi_is_host_mem_allocated(struct ath10k *ar, + const struct wlan_host_mem_req **mem_reqs, + u32 num_mem_reqs) +{ + u32 req_id, num_units, unit_size, num_unit_info; + u32 pool_size; + int i, j; + bool found; + + if (ar->wmi.num_mem_chunks != num_mem_reqs) + return false; + + for (i = 0; i < num_mem_reqs; ++i) { + req_id = __le32_to_cpu(mem_reqs[i]->req_id); + num_units = __le32_to_cpu(mem_reqs[i]->num_units); + unit_size = __le32_to_cpu(mem_reqs[i]->unit_size); + num_unit_info = __le32_to_cpu(mem_reqs[i]->num_unit_info); + + if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) { + if (ar->num_active_peers) + num_units = ar->num_active_peers + 1; + else + num_units = ar->max_num_peers + 1; + } else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) { + num_units = ar->max_num_peers + 1; + } else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) { + num_units = ar->max_num_vdevs + 1; + } + + found = false; + for (j = 0; j < ar->wmi.num_mem_chunks; j++) { + if (ar->wmi.mem_chunks[j].req_id == req_id) { + pool_size = num_units * round_up(unit_size, 4); + if (ar->wmi.mem_chunks[j].len == pool_size) { + found = true; + break; + } + } + } + if (!found) + return false; + } + + return true; +} + +static int +ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb, + struct wmi_svc_rdy_ev_arg *arg) +{ + struct wmi_service_ready_event *ev; + size_t i, n; + + if (skb->len < sizeof(*ev)) + return -EPROTO; + + ev = (void *)skb->data; + skb_pull(skb, sizeof(*ev)); + arg->min_tx_power = ev->hw_min_tx_power; + arg->max_tx_power = ev->hw_max_tx_power; + arg->ht_cap = ev->ht_cap_info; + arg->vht_cap = ev->vht_cap_info; + arg->vht_supp_mcs = ev->vht_supp_mcs; + arg->sw_ver0 = ev->sw_version; + arg->sw_ver1 = ev->sw_version_1; + arg->phy_capab = ev->phy_capability; + arg->num_rf_chains = ev->num_rf_chains; + arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd; + arg->low_2ghz_chan = ev->hal_reg_capabilities.low_2ghz_chan; + arg->high_2ghz_chan = ev->hal_reg_capabilities.high_2ghz_chan; + arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan; + arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan; + arg->num_mem_reqs = ev->num_mem_reqs; + arg->service_map = ev->wmi_service_bitmap; + arg->service_map_len = sizeof(ev->wmi_service_bitmap); + + n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs), + ARRAY_SIZE(arg->mem_reqs)); + for (i = 0; i < n; i++) + arg->mem_reqs[i] = &ev->mem_reqs[i]; + + if (skb->len < + __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0])) + return -EPROTO; + + return 0; +} + +static int +ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb, + struct wmi_svc_rdy_ev_arg *arg) +{ + struct wmi_10x_service_ready_event *ev; + int i, n; + + if (skb->len < sizeof(*ev)) + return -EPROTO; + + ev = (void *)skb->data; + skb_pull(skb, sizeof(*ev)); + arg->min_tx_power = ev->hw_min_tx_power; + arg->max_tx_power = ev->hw_max_tx_power; + arg->ht_cap = ev->ht_cap_info; + arg->vht_cap = ev->vht_cap_info; + arg->vht_supp_mcs = ev->vht_supp_mcs; + arg->sw_ver0 = ev->sw_version; + arg->phy_capab = ev->phy_capability; + arg->num_rf_chains = ev->num_rf_chains; + arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd; + arg->low_2ghz_chan = ev->hal_reg_capabilities.low_2ghz_chan; + arg->high_2ghz_chan = ev->hal_reg_capabilities.high_2ghz_chan; + arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan; + arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan; + arg->num_mem_reqs = ev->num_mem_reqs; + arg->service_map = ev->wmi_service_bitmap; + arg->service_map_len = sizeof(ev->wmi_service_bitmap); + + /* Deliberately skipping ev->sys_cap_info as WMI and WMI-TLV have + * different values. We would need a translation to handle that, + * but as we don't currently need anything from sys_cap_info from + * WMI interface (only from WMI-TLV) safest it to skip it. + */ + + n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs), + ARRAY_SIZE(arg->mem_reqs)); + for (i = 0; i < n; i++) + arg->mem_reqs[i] = &ev->mem_reqs[i]; + + if (skb->len < + __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0])) + return -EPROTO; + + return 0; +} + +static void ath10k_wmi_event_service_ready_work(struct work_struct *work) +{ + struct ath10k *ar = container_of(work, struct ath10k, svc_rdy_work); + struct sk_buff *skb = ar->svc_rdy_skb; + struct wmi_svc_rdy_ev_arg arg = {}; + u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i; + int ret; + bool allocated; + + if (!skb) { + ath10k_warn(ar, "invalid service ready event skb\n"); + return; + } + + ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg); + if (ret) { + ath10k_warn(ar, "failed to parse service ready: %d\n", ret); + return; + } + + ath10k_wmi_map_svc(ar, arg.service_map, ar->wmi.svc_map, + arg.service_map_len); + + ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power); + ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power); + ar->ht_cap_info = __le32_to_cpu(arg.ht_cap); + ar->vht_cap_info = __le32_to_cpu(arg.vht_cap); + ar->vht_supp_mcs = __le32_to_cpu(arg.vht_supp_mcs); ar->fw_version_major = - (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24; - ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff); + (__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24; + ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff); ar->fw_version_release = - (__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16; - ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff); - ar->phy_capability = __le32_to_cpu(ev->phy_capability); - - ar->ath_common.regulatory.current_rd = - __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd); + (__le32_to_cpu(arg.sw_ver1) & 0xffff0000) >> 16; + ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff); + ar->phy_capability = __le32_to_cpu(arg.phy_capab); + ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains); + ar->hw_eeprom_rd = __le32_to_cpu(arg.eeprom_rd); + ar->low_2ghz_chan = __le32_to_cpu(arg.low_2ghz_chan); + ar->high_2ghz_chan = __le32_to_cpu(arg.high_2ghz_chan); + ar->low_5ghz_chan = __le32_to_cpu(arg.low_5ghz_chan); + ar->high_5ghz_chan = __le32_to_cpu(arg.high_5ghz_chan); + ar->sys_cap_info = __le32_to_cpu(arg.sys_cap_info); + + ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ", + arg.service_map, arg.service_map_len); + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sys_cap_info 0x%x\n", + ar->sys_cap_info); + + if (ar->num_rf_chains > ar->max_spatial_stream) { + ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n", + ar->num_rf_chains, ar->max_spatial_stream); + ar->num_rf_chains = ar->max_spatial_stream; + } - ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap, - sizeof(ev->wmi_service_bitmap)); + if (!ar->cfg_tx_chainmask) { + ar->cfg_tx_chainmask = (1 << ar->num_rf_chains) - 1; + ar->cfg_rx_chainmask = (1 << ar->num_rf_chains) - 1; + } if (strlen(ar->hw->wiphy->fw_version) == 0) { snprintf(ar->hw->wiphy->fw_version, @@ -878,62 +5601,291 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, ar->fw_version_build); } - /* FIXME: it probably should be better to support this */ - if (__le32_to_cpu(ev->num_mem_reqs) > 0) { - ath10k_warn("target requested %d memory chunks; ignoring\n", - __le32_to_cpu(ev->num_mem_reqs)); + num_mem_reqs = __le32_to_cpu(arg.num_mem_reqs); + if (num_mem_reqs > WMI_MAX_MEM_REQS) { + ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n", + num_mem_reqs); + return; + } + + if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) { + if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, + ar->running_fw->fw_file.fw_features)) + ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC + + ar->max_num_vdevs; + else + ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS + + ar->max_num_vdevs; + + ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX + + ar->max_num_vdevs; + ar->num_tids = ar->num_active_peers * 2; + ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX; + } + + /* TODO: Adjust max peer count for cases like WMI_SERVICE_RATECTRL_CACHE + * and WMI_SERVICE_IRAM_TIDS, etc. + */ + + allocated = ath10k_wmi_is_host_mem_allocated(ar, arg.mem_reqs, + num_mem_reqs); + if (allocated) + goto skip_mem_alloc; + + /* Either this event is received during boot time or there is a change + * in memory requirement from firmware when compared to last request. + * Free any old memory and do a fresh allocation based on the current + * memory requirement. + */ + ath10k_wmi_free_host_mem(ar); + + for (i = 0; i < num_mem_reqs; ++i) { + req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id); + num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units); + unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size); + num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info); + + if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) { + if (ar->num_active_peers) + num_units = ar->num_active_peers + 1; + else + num_units = ar->max_num_peers + 1; + } else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) { + /* number of units to allocate is number of + * peers, 1 extra for self peer on target + * this needs to be tied, host and target + * can get out of sync + */ + num_units = ar->max_num_peers + 1; + } else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) { + num_units = ar->max_num_vdevs + 1; + } + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n", + req_id, + __le32_to_cpu(arg.mem_reqs[i]->num_units), + num_unit_info, + unit_size, + num_units); + + ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units, + unit_size); + if (ret) + return; } - ath10k_dbg(ATH10K_DBG_WMI, - "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u\n", - __le32_to_cpu(ev->sw_version), - __le32_to_cpu(ev->sw_version_1), - __le32_to_cpu(ev->abi_version), - __le32_to_cpu(ev->phy_capability), - __le32_to_cpu(ev->ht_cap_info), - __le32_to_cpu(ev->vht_cap_info), - __le32_to_cpu(ev->vht_supp_mcs), - __le32_to_cpu(ev->sys_cap_info), - __le32_to_cpu(ev->num_mem_reqs)); +skip_mem_alloc: + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_mcs 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x low_2ghz_chan %d high_2ghz_chan %d low_5ghz_chan %d high_5ghz_chan %d num_mem_reqs 0x%08x\n", + __le32_to_cpu(arg.min_tx_power), + __le32_to_cpu(arg.max_tx_power), + __le32_to_cpu(arg.ht_cap), + __le32_to_cpu(arg.vht_cap), + __le32_to_cpu(arg.vht_supp_mcs), + __le32_to_cpu(arg.sw_ver0), + __le32_to_cpu(arg.sw_ver1), + __le32_to_cpu(arg.fw_build), + __le32_to_cpu(arg.phy_capab), + __le32_to_cpu(arg.num_rf_chains), + __le32_to_cpu(arg.eeprom_rd), + __le32_to_cpu(arg.low_2ghz_chan), + __le32_to_cpu(arg.high_2ghz_chan), + __le32_to_cpu(arg.low_5ghz_chan), + __le32_to_cpu(arg.high_5ghz_chan), + __le32_to_cpu(arg.num_mem_reqs)); + dev_kfree_skb(skb); + ar->svc_rdy_skb = NULL; complete(&ar->wmi.service_ready); } -static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb) +void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb) { - struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data; + ar->svc_rdy_skb = skb; + queue_work(ar->workqueue_aux, &ar->svc_rdy_work); +} - if (WARN_ON(skb->len < sizeof(*ev))) - return -EINVAL; +static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb, + struct wmi_rdy_ev_arg *arg) +{ + struct wmi_ready_event *ev = (void *)skb->data; - memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN); + if (skb->len < sizeof(*ev)) + return -EPROTO; + + skb_pull(skb, sizeof(*ev)); + arg->sw_version = ev->sw_version; + arg->abi_version = ev->abi_version; + arg->status = ev->status; + arg->mac_addr = ev->mac_addr.addr; + + return 0; +} - ath10k_dbg(ATH10K_DBG_WMI, - "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n", - __le32_to_cpu(ev->sw_version), - __le32_to_cpu(ev->abi_version), - ev->mac_addr.addr, - __le32_to_cpu(ev->status)); +static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb, + struct wmi_roam_ev_arg *arg) +{ + struct wmi_roam_ev *ev = (void *)skb->data; + + if (skb->len < sizeof(*ev)) + return -EPROTO; + + skb_pull(skb, sizeof(*ev)); + arg->vdev_id = ev->vdev_id; + arg->reason = ev->reason; + + return 0; +} + +static int ath10k_wmi_op_pull_echo_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_echo_ev_arg *arg) +{ + struct wmi_echo_event *ev = (void *)skb->data; + arg->value = ev->value; + + return 0; +} + +int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_rdy_ev_arg arg = {}; + int ret; + + ret = ath10k_wmi_pull_rdy(ar, skb, &arg); + if (ret) { + ath10k_warn(ar, "failed to parse ready event: %d\n", ret); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi event ready sw_version 0x%08x abi_version %u mac_addr %pM status %d\n", + __le32_to_cpu(arg.sw_version), + __le32_to_cpu(arg.abi_version), + arg.mac_addr, + __le32_to_cpu(arg.status)); + + if (is_zero_ether_addr(ar->mac_addr)) + ether_addr_copy(ar->mac_addr, arg.mac_addr); complete(&ar->wmi.unified_ready); return 0; } -static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb) +void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb) +{ + int ret; + struct wmi_svc_avail_ev_arg arg = {}; + + ret = ath10k_wmi_pull_svc_avail(ar, skb, &arg); + if (ret) { + ath10k_warn(ar, "failed to parse service available event: %d\n", + ret); + } + + /* + * Initialization of "arg.service_map_ext_valid" to ZERO is necessary + * for the below logic to work. + */ + if (arg.service_map_ext_valid) + ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map, + __le32_to_cpu(arg.service_map_ext_len)); +} + +static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb) +{ + const struct wmi_pdev_temperature_event *ev; + + ev = (struct wmi_pdev_temperature_event *)skb->data; + if (WARN_ON(skb->len < sizeof(*ev))) + return -EPROTO; + + ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature)); + return 0; +} + +static int ath10k_wmi_event_pdev_bss_chan_info(struct ath10k *ar, + struct sk_buff *skb) +{ + struct wmi_pdev_bss_chan_info_event *ev; + struct survey_info *survey; + u64 busy, total, tx, rx, rx_bss; + u32 freq, noise_floor; + u32 cc_freq_hz = ar->hw_params.channel_counters_freq_hz; + int idx; + + ev = (struct wmi_pdev_bss_chan_info_event *)skb->data; + if (WARN_ON(skb->len < sizeof(*ev))) + return -EPROTO; + + freq = __le32_to_cpu(ev->freq); + noise_floor = __le32_to_cpu(ev->noise_floor); + busy = __le64_to_cpu(ev->cycle_busy); + total = __le64_to_cpu(ev->cycle_total); + tx = __le64_to_cpu(ev->cycle_tx); + rx = __le64_to_cpu(ev->cycle_rx); + rx_bss = __le64_to_cpu(ev->cycle_rx_bss); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi event pdev bss chan info:\n freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n", + freq, noise_floor, busy, total, tx, rx, rx_bss); + + spin_lock_bh(&ar->data_lock); + idx = freq_to_idx(ar, freq); + if (idx >= ARRAY_SIZE(ar->survey)) { + ath10k_warn(ar, "bss chan info: invalid frequency %d (idx %d out of bounds)\n", + freq, idx); + goto exit; + } + + survey = &ar->survey[idx]; + + survey->noise = noise_floor; + survey->time = div_u64(total, cc_freq_hz); + survey->time_busy = div_u64(busy, cc_freq_hz); + survey->time_rx = div_u64(rx_bss, cc_freq_hz); + survey->time_tx = div_u64(tx, cc_freq_hz); + survey->filled |= (SURVEY_INFO_NOISE_DBM | + SURVEY_INFO_TIME | + SURVEY_INFO_TIME_BUSY | + SURVEY_INFO_TIME_RX | + SURVEY_INFO_TIME_TX); +exit: + spin_unlock_bh(&ar->data_lock); + complete(&ar->bss_survey_done); + return 0; +} + +static inline void ath10k_wmi_queue_set_coverage_class_work(struct ath10k *ar) +{ + if (ar->hw_params.hw_ops->set_coverage_class) { + spin_lock_bh(&ar->data_lock); + + /* This call only ensures that the modified coverage class + * persists in case the firmware sets the registers back to + * their default value. So calling it is only necessary if the + * coverage class has a non-zero value. + */ + if (ar->fw_coverage.coverage_class) + queue_work(ar->workqueue, &ar->set_coverage_class_work); + + spin_unlock_bh(&ar->data_lock); + } +} + +static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb) { struct wmi_cmd_hdr *cmd_hdr; enum wmi_event_id id; - u16 len; cmd_hdr = (struct wmi_cmd_hdr *)skb->data; id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) - return; - - len = skb->len; + goto out; - trace_ath10k_wmi_event(id, skb->data, skb->len); + trace_ath10k_wmi_event(ar, id, skb->data, skb->len); switch (id) { case WMI_MGMT_RX_EVENTID: @@ -942,6 +5894,7 @@ static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb) return; case WMI_SCAN_EVENTID: ath10k_wmi_event_scan(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); break; case WMI_CHAN_INFO_EVENTID: ath10k_wmi_event_chan_info(ar, skb); @@ -951,15 +5904,18 @@ static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb) break; case WMI_DEBUG_MESG_EVENTID: ath10k_wmi_event_debug_mesg(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); break; case WMI_UPDATE_STATS_EVENTID: ath10k_wmi_event_update_stats(ar, skb); break; case WMI_VDEV_START_RESP_EVENTID: ath10k_wmi_event_vdev_start_resp(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); break; case WMI_VDEV_STOPPED_EVENTID: ath10k_wmi_event_vdev_stopped(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); break; case WMI_PEER_STA_KICKOUT_EVENTID: ath10k_wmi_event_peer_sta_kickout(ar, skb); @@ -975,12 +5931,14 @@ static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb) break; case WMI_ROAM_EVENTID: ath10k_wmi_event_roam(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); break; case WMI_PROFILE_MATCH: ath10k_wmi_event_profile_match(ar, skb); break; case WMI_DEBUG_PRINT_EVENTID: ath10k_wmi_event_debug_print(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); break; case WMI_PDEV_QVIT_EVENTID: ath10k_wmi_event_pdev_qvit(ar, skb); @@ -1025,98 +5983,455 @@ static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_vdev_install_key_complete(ar, skb); break; case WMI_SERVICE_READY_EVENTID: - ath10k_wmi_service_ready_event_rx(ar, skb); - break; + ath10k_wmi_event_service_ready(ar, skb); + return; case WMI_READY_EVENTID: - ath10k_wmi_ready_event_rx(ar, skb); + ath10k_wmi_event_ready(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); + break; + case WMI_SERVICE_AVAILABLE_EVENTID: + ath10k_wmi_event_service_available(ar, skb); break; default: - ath10k_warn("Unknown eventid: %d\n", id); + ath10k_warn(ar, "Unknown eventid: %d\n", id); break; } +out: dev_kfree_skb(skb); } -static void ath10k_wmi_event_work(struct work_struct *work) +static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb) { - struct ath10k *ar = container_of(work, struct ath10k, - wmi.wmi_event_work); - struct sk_buff *skb; + struct wmi_cmd_hdr *cmd_hdr; + enum wmi_10x_event_id id; + bool consumed; - for (;;) { - skb = skb_dequeue(&ar->wmi.wmi_event_list); - if (!skb) - break; + cmd_hdr = (struct wmi_cmd_hdr *)skb->data; + id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); - ath10k_wmi_event_process(ar, skb); + if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) + goto out; + + trace_ath10k_wmi_event(ar, id, skb->data, skb->len); + + consumed = ath10k_tm_event_wmi(ar, id, skb); + + /* Ready event must be handled normally also in UTF mode so that we + * know the UTF firmware has booted, others we are just bypass WMI + * events to testmode. + */ + if (consumed && id != WMI_10X_READY_EVENTID) { + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi testmode consumed 0x%x\n", id); + goto out; + } + + switch (id) { + case WMI_10X_MGMT_RX_EVENTID: + ath10k_wmi_event_mgmt_rx(ar, skb); + /* mgmt_rx() owns the skb now! */ + return; + case WMI_10X_SCAN_EVENTID: + ath10k_wmi_event_scan(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); + break; + case WMI_10X_CHAN_INFO_EVENTID: + ath10k_wmi_event_chan_info(ar, skb); + break; + case WMI_10X_ECHO_EVENTID: + ath10k_wmi_event_echo(ar, skb); + break; + case WMI_10X_DEBUG_MESG_EVENTID: + ath10k_wmi_event_debug_mesg(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); + break; + case WMI_10X_UPDATE_STATS_EVENTID: + ath10k_wmi_event_update_stats(ar, skb); + break; + case WMI_10X_VDEV_START_RESP_EVENTID: + ath10k_wmi_event_vdev_start_resp(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); + break; + case WMI_10X_VDEV_STOPPED_EVENTID: + ath10k_wmi_event_vdev_stopped(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); + break; + case WMI_10X_PEER_STA_KICKOUT_EVENTID: + ath10k_wmi_event_peer_sta_kickout(ar, skb); + break; + case WMI_10X_HOST_SWBA_EVENTID: + ath10k_wmi_event_host_swba(ar, skb); + break; + case WMI_10X_TBTTOFFSET_UPDATE_EVENTID: + ath10k_wmi_event_tbttoffset_update(ar, skb); + break; + case WMI_10X_PHYERR_EVENTID: + ath10k_wmi_event_phyerr(ar, skb); + break; + case WMI_10X_ROAM_EVENTID: + ath10k_wmi_event_roam(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); + break; + case WMI_10X_PROFILE_MATCH: + ath10k_wmi_event_profile_match(ar, skb); + break; + case WMI_10X_DEBUG_PRINT_EVENTID: + ath10k_wmi_event_debug_print(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); + break; + case WMI_10X_PDEV_QVIT_EVENTID: + ath10k_wmi_event_pdev_qvit(ar, skb); + break; + case WMI_10X_WLAN_PROFILE_DATA_EVENTID: + ath10k_wmi_event_wlan_profile_data(ar, skb); + break; + case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID: + ath10k_wmi_event_rtt_measurement_report(ar, skb); + break; + case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID: + ath10k_wmi_event_tsf_measurement_report(ar, skb); + break; + case WMI_10X_RTT_ERROR_REPORT_EVENTID: + ath10k_wmi_event_rtt_error_report(ar, skb); + break; + case WMI_10X_WOW_WAKEUP_HOST_EVENTID: + ath10k_wmi_event_wow_wakeup_host(ar, skb); + break; + case WMI_10X_DCS_INTERFERENCE_EVENTID: + ath10k_wmi_event_dcs_interference(ar, skb); + break; + case WMI_10X_PDEV_TPC_CONFIG_EVENTID: + ath10k_wmi_event_pdev_tpc_config(ar, skb); + break; + case WMI_10X_INST_RSSI_STATS_EVENTID: + ath10k_wmi_event_inst_rssi_stats(ar, skb); + break; + case WMI_10X_VDEV_STANDBY_REQ_EVENTID: + ath10k_wmi_event_vdev_standby_req(ar, skb); + break; + case WMI_10X_VDEV_RESUME_REQ_EVENTID: + ath10k_wmi_event_vdev_resume_req(ar, skb); + break; + case WMI_10X_SERVICE_READY_EVENTID: + ath10k_wmi_event_service_ready(ar, skb); + return; + case WMI_10X_READY_EVENTID: + ath10k_wmi_event_ready(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); + break; + case WMI_10X_PDEV_UTF_EVENTID: + /* ignore utf events */ + break; + default: + ath10k_warn(ar, "Unknown eventid: %d\n", id); + break; } + +out: + dev_kfree_skb(skb); } -static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb) +static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb) { - struct wmi_cmd_hdr *cmd_hdr = (struct wmi_cmd_hdr *)skb->data; - enum wmi_event_id event_id; + struct wmi_cmd_hdr *cmd_hdr; + enum wmi_10_2_event_id id; + bool consumed; - event_id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); + cmd_hdr = (struct wmi_cmd_hdr *)skb->data; + id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); - /* some events require to be handled ASAP - * thus can't be defered to a worker thread */ - switch (event_id) { - case WMI_HOST_SWBA_EVENTID: - case WMI_MGMT_RX_EVENTID: - ath10k_wmi_event_process(ar, skb); + if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) + goto out; + + trace_ath10k_wmi_event(ar, id, skb->data, skb->len); + + consumed = ath10k_tm_event_wmi(ar, id, skb); + + /* Ready event must be handled normally also in UTF mode so that we + * know the UTF firmware has booted, others we are just bypass WMI + * events to testmode. + */ + if (consumed && id != WMI_10_2_READY_EVENTID) { + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi testmode consumed 0x%x\n", id); + goto out; + } + + switch (id) { + case WMI_10_2_MGMT_RX_EVENTID: + ath10k_wmi_event_mgmt_rx(ar, skb); + /* mgmt_rx() owns the skb now! */ + return; + case WMI_10_2_SCAN_EVENTID: + ath10k_wmi_event_scan(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); + break; + case WMI_10_2_CHAN_INFO_EVENTID: + ath10k_wmi_event_chan_info(ar, skb); + break; + case WMI_10_2_ECHO_EVENTID: + ath10k_wmi_event_echo(ar, skb); + break; + case WMI_10_2_DEBUG_MESG_EVENTID: + ath10k_wmi_event_debug_mesg(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); + break; + case WMI_10_2_UPDATE_STATS_EVENTID: + ath10k_wmi_event_update_stats(ar, skb); + break; + case WMI_10_2_VDEV_START_RESP_EVENTID: + ath10k_wmi_event_vdev_start_resp(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); + break; + case WMI_10_2_VDEV_STOPPED_EVENTID: + ath10k_wmi_event_vdev_stopped(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); + break; + case WMI_10_2_PEER_STA_KICKOUT_EVENTID: + ath10k_wmi_event_peer_sta_kickout(ar, skb); + break; + case WMI_10_2_HOST_SWBA_EVENTID: + ath10k_wmi_event_host_swba(ar, skb); + break; + case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID: + ath10k_wmi_event_tbttoffset_update(ar, skb); + break; + case WMI_10_2_PHYERR_EVENTID: + ath10k_wmi_event_phyerr(ar, skb); + break; + case WMI_10_2_ROAM_EVENTID: + ath10k_wmi_event_roam(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); + break; + case WMI_10_2_PROFILE_MATCH: + ath10k_wmi_event_profile_match(ar, skb); + break; + case WMI_10_2_DEBUG_PRINT_EVENTID: + ath10k_wmi_event_debug_print(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); + break; + case WMI_10_2_PDEV_QVIT_EVENTID: + ath10k_wmi_event_pdev_qvit(ar, skb); + break; + case WMI_10_2_WLAN_PROFILE_DATA_EVENTID: + ath10k_wmi_event_wlan_profile_data(ar, skb); + break; + case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID: + ath10k_wmi_event_rtt_measurement_report(ar, skb); + break; + case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID: + ath10k_wmi_event_tsf_measurement_report(ar, skb); + break; + case WMI_10_2_RTT_ERROR_REPORT_EVENTID: + ath10k_wmi_event_rtt_error_report(ar, skb); + break; + case WMI_10_2_WOW_WAKEUP_HOST_EVENTID: + ath10k_wmi_event_wow_wakeup_host(ar, skb); + break; + case WMI_10_2_DCS_INTERFERENCE_EVENTID: + ath10k_wmi_event_dcs_interference(ar, skb); + break; + case WMI_10_2_PDEV_TPC_CONFIG_EVENTID: + ath10k_wmi_event_pdev_tpc_config(ar, skb); + break; + case WMI_10_2_INST_RSSI_STATS_EVENTID: + ath10k_wmi_event_inst_rssi_stats(ar, skb); + break; + case WMI_10_2_VDEV_STANDBY_REQ_EVENTID: + ath10k_wmi_event_vdev_standby_req(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); + break; + case WMI_10_2_VDEV_RESUME_REQ_EVENTID: + ath10k_wmi_event_vdev_resume_req(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); + break; + case WMI_10_2_SERVICE_READY_EVENTID: + ath10k_wmi_event_service_ready(ar, skb); return; + case WMI_10_2_READY_EVENTID: + ath10k_wmi_event_ready(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); + break; + case WMI_10_2_PDEV_TEMPERATURE_EVENTID: + ath10k_wmi_event_temperature(ar, skb); + break; + case WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID: + ath10k_wmi_event_pdev_bss_chan_info(ar, skb); + break; + case WMI_10_2_RTT_KEEPALIVE_EVENTID: + case WMI_10_2_GPIO_INPUT_EVENTID: + case WMI_10_2_PEER_RATECODE_LIST_EVENTID: + case WMI_10_2_GENERIC_BUFFER_EVENTID: + case WMI_10_2_MCAST_BUF_RELEASE_EVENTID: + case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID: + case WMI_10_2_WDS_PEER_EVENTID: + ath10k_dbg(ar, ATH10K_DBG_WMI, + "received event id %d not implemented\n", id); + break; + case WMI_10_2_PEER_STA_PS_STATECHG_EVENTID: + ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb); + break; default: + ath10k_warn(ar, "Unknown eventid: %d\n", id); break; } - skb_queue_tail(&ar->wmi.wmi_event_list, skb); - queue_work(ar->workqueue, &ar->wmi.wmi_event_work); +out: + dev_kfree_skb(skb); } -/* WMI Initialization functions */ -int ath10k_wmi_attach(struct ath10k *ar) +static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb) { - init_completion(&ar->wmi.service_ready); - init_completion(&ar->wmi.unified_ready); - init_waitqueue_head(&ar->wmi.wq); + struct wmi_cmd_hdr *cmd_hdr; + enum wmi_10_4_event_id id; + bool consumed; - skb_queue_head_init(&ar->wmi.wmi_event_list); - INIT_WORK(&ar->wmi.wmi_event_work, ath10k_wmi_event_work); + cmd_hdr = (struct wmi_cmd_hdr *)skb->data; + id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); - return 0; + if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr))) + goto out; + + trace_ath10k_wmi_event(ar, id, skb->data, skb->len); + + consumed = ath10k_tm_event_wmi(ar, id, skb); + + /* Ready event must be handled normally also in UTF mode so that we + * know the UTF firmware has booted, others we are just bypass WMI + * events to testmode. + */ + if (consumed && id != WMI_10_4_READY_EVENTID) { + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi testmode consumed 0x%x\n", id); + goto out; + } + + switch (id) { + case WMI_10_4_MGMT_RX_EVENTID: + ath10k_wmi_event_mgmt_rx(ar, skb); + /* mgmt_rx() owns the skb now! */ + return; + case WMI_10_4_ECHO_EVENTID: + ath10k_wmi_event_echo(ar, skb); + break; + case WMI_10_4_DEBUG_MESG_EVENTID: + ath10k_wmi_event_debug_mesg(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); + break; + case WMI_10_4_SERVICE_READY_EVENTID: + ath10k_wmi_event_service_ready(ar, skb); + return; + case WMI_10_4_SCAN_EVENTID: + ath10k_wmi_event_scan(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); + break; + case WMI_10_4_CHAN_INFO_EVENTID: + ath10k_wmi_event_chan_info(ar, skb); + break; + case WMI_10_4_PHYERR_EVENTID: + ath10k_wmi_event_phyerr(ar, skb); + break; + case WMI_10_4_READY_EVENTID: + ath10k_wmi_event_ready(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); + break; + case WMI_10_4_PEER_STA_KICKOUT_EVENTID: + ath10k_wmi_event_peer_sta_kickout(ar, skb); + break; + case WMI_10_4_ROAM_EVENTID: + ath10k_wmi_event_roam(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); + break; + case WMI_10_4_HOST_SWBA_EVENTID: + ath10k_wmi_event_host_swba(ar, skb); + break; + case WMI_10_4_TBTTOFFSET_UPDATE_EVENTID: + ath10k_wmi_event_tbttoffset_update(ar, skb); + break; + case WMI_10_4_DEBUG_PRINT_EVENTID: + ath10k_wmi_event_debug_print(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); + break; + case WMI_10_4_VDEV_START_RESP_EVENTID: + ath10k_wmi_event_vdev_start_resp(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); + break; + case WMI_10_4_VDEV_STOPPED_EVENTID: + ath10k_wmi_event_vdev_stopped(ar, skb); + ath10k_wmi_queue_set_coverage_class_work(ar); + break; + case WMI_10_4_WOW_WAKEUP_HOST_EVENTID: + case WMI_10_4_PEER_RATECODE_LIST_EVENTID: + case WMI_10_4_WDS_PEER_EVENTID: + case WMI_10_4_DEBUG_FATAL_CONDITION_EVENTID: + ath10k_dbg(ar, ATH10K_DBG_WMI, + "received event id %d not implemented\n", id); + break; + case WMI_10_4_UPDATE_STATS_EVENTID: + ath10k_wmi_event_update_stats(ar, skb); + break; + case WMI_10_4_PDEV_TEMPERATURE_EVENTID: + ath10k_wmi_event_temperature(ar, skb); + break; + case WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID: + ath10k_wmi_event_pdev_bss_chan_info(ar, skb); + break; + case WMI_10_4_PDEV_TPC_CONFIG_EVENTID: + ath10k_wmi_event_pdev_tpc_config(ar, skb); + break; + case WMI_10_4_TDLS_PEER_EVENTID: + ath10k_wmi_handle_tdls_peer_event(ar, skb); + break; + case WMI_10_4_PDEV_TPC_TABLE_EVENTID: + ath10k_wmi_event_tpc_final_table(ar, skb); + break; + case WMI_10_4_DFS_STATUS_CHECK_EVENTID: + ath10k_wmi_event_dfs_status_check(ar, skb); + break; + case WMI_10_4_PEER_STA_PS_STATECHG_EVENTID: + ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb); + break; + default: + ath10k_warn(ar, "Unknown eventid: %d\n", id); + break; + } + +out: + dev_kfree_skb(skb); } -void ath10k_wmi_detach(struct ath10k *ar) +static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb) { - /* HTC should've drained the packets already */ - if (WARN_ON(atomic_read(&ar->wmi.pending_tx_count) > 0)) - ath10k_warn("there are still pending packets\n"); + int ret; - cancel_work_sync(&ar->wmi.wmi_event_work); - skb_queue_purge(&ar->wmi.wmi_event_list); + ret = ath10k_wmi_rx(ar, skb); + if (ret) + ath10k_warn(ar, "failed to process wmi rx: %d\n", ret); } -int ath10k_wmi_connect_htc_service(struct ath10k *ar) +int ath10k_wmi_connect(struct ath10k *ar) { int status; struct ath10k_htc_svc_conn_req conn_req; struct ath10k_htc_svc_conn_resp conn_resp; + memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map)); + memset(&conn_req, 0, sizeof(conn_req)); memset(&conn_resp, 0, sizeof(conn_resp)); /* these fields are the same for all service endpoints */ conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete; conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx; + conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits; /* connect to control service */ conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL; - status = ath10k_htc_connect_service(ar->htc, &conn_req, &conn_resp); + status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp); if (status) { - ath10k_warn("failed to connect to WMI CONTROL service status: %d\n", + ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n", status); return status; } @@ -1125,15 +6440,36 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar) return 0; } -int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, - u16 rd5g, u16 ctl2g, u16 ctl5g) +static struct sk_buff * +ath10k_wmi_op_gen_pdev_set_base_macaddr(struct ath10k *ar, + const u8 macaddr[ETH_ALEN]) +{ + struct wmi_pdev_set_base_macaddr_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_pdev_set_base_macaddr_cmd *)skb->data; + ether_addr_copy(cmd->mac_addr.addr, macaddr); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi pdev basemac %pM\n", macaddr); + return skb; +} + +static struct sk_buff * +ath10k_wmi_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g, + u16 ctl2g, u16 ctl5g, + enum wmi_dfs_region dfs_reg) { struct wmi_pdev_set_regdomain_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data; cmd->reg_domain = __cpu_to_le32(rd); @@ -1142,89 +6478,114 @@ int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g); cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n", rd, rd2g, rd5g, ctl2g, ctl5g); - - return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_REGDOMAIN_CMDID); + return skb; } -int ath10k_wmi_pdev_set_channel(struct ath10k *ar, - const struct wmi_channel_arg *arg) +static struct sk_buff * +ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 + rd5g, u16 ctl2g, u16 ctl5g, + enum wmi_dfs_region dfs_reg) { - struct wmi_set_channel_cmd *cmd; + struct wmi_pdev_set_regdomain_cmd_10x *cmd; struct sk_buff *skb; - if (arg->passive) - return -EINVAL; - - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) - return -ENOMEM; - - cmd = (struct wmi_set_channel_cmd *)skb->data; - cmd->chan.mhz = __cpu_to_le32(arg->freq); - cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq); - cmd->chan.mode = arg->mode; - cmd->chan.min_power = arg->min_power; - cmd->chan.max_power = arg->max_power; - cmd->chan.reg_power = arg->max_reg_power; - cmd->chan.reg_classid = arg->reg_class_id; - cmd->chan.antenna_max = arg->max_antenna_gain; + return ERR_PTR(-ENOMEM); - ath10k_dbg(ATH10K_DBG_WMI, - "wmi set channel mode %d freq %d\n", - arg->mode, arg->freq); + cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data; + cmd->reg_domain = __cpu_to_le32(rd); + cmd->reg_domain_2G = __cpu_to_le32(rd2g); + cmd->reg_domain_5G = __cpu_to_le32(rd5g); + cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g); + cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g); + cmd->dfs_domain = __cpu_to_le32(dfs_reg); - return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_CHANNEL_CMDID); + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n", + rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg); + return skb; } -int ath10k_wmi_pdev_suspend_target(struct ath10k *ar) +static struct sk_buff * +ath10k_wmi_op_gen_pdev_suspend(struct ath10k *ar, u32 suspend_opt) { struct wmi_pdev_suspend_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); cmd = (struct wmi_pdev_suspend_cmd *)skb->data; - cmd->suspend_opt = WMI_PDEV_SUSPEND; + cmd->suspend_opt = __cpu_to_le32(suspend_opt); - return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SUSPEND_CMDID); + return skb; } -int ath10k_wmi_pdev_resume_target(struct ath10k *ar) +static struct sk_buff * +ath10k_wmi_op_gen_pdev_resume(struct ath10k *ar) { struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(0); - if (skb == NULL) - return -ENOMEM; + skb = ath10k_wmi_alloc_skb(ar, 0); + if (!skb) + return ERR_PTR(-ENOMEM); - return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_RESUME_CMDID); + return skb; } -int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id, - u32 value) +static struct sk_buff * +ath10k_wmi_op_gen_pdev_set_param(struct ath10k *ar, u32 id, u32 value) { struct wmi_pdev_set_param_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (id == WMI_PDEV_PARAM_UNSUPPORTED) { + ath10k_warn(ar, "pdev param %d not supported by firmware\n", + id); + return ERR_PTR(-EOPNOTSUPP); + } + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); cmd = (struct wmi_pdev_set_param_cmd *)skb->data; cmd->param_id = __cpu_to_le32(id); cmd->param_value = __cpu_to_le32(value); - ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n", + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n", id, value); - return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_PARAM_CMDID); + return skb; } -int ath10k_wmi_cmd_init(struct ath10k *ar) +void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar, + struct wmi_host_mem_chunks *chunks) +{ + struct host_memory_chunk *chunk; + int i; + + chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks); + + for (i = 0; i < ar->wmi.num_mem_chunks; i++) { + chunk = &chunks->items[i]; + chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); + chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len); + chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi chunk %d len %d requested, addr 0x%llx\n", + i, + ar->wmi.mem_chunks[i].len, + (unsigned long long)ar->wmi.mem_chunks[i].paddr); + } +} + +static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar) { struct wmi_init_cmd *cmd; struct sk_buff *buf; @@ -1232,7 +6593,7 @@ int ath10k_wmi_cmd_init(struct ath10k *ar) u32 val; config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS); - config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS); + config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS); config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS); config.num_offload_reorder_bufs = @@ -1247,8 +6608,7 @@ int ath10k_wmi_cmd_init(struct ath10k *ar) config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI); config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI); config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI); - config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE); - + config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode); config.scan_max_pending_reqs = __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS); @@ -1282,60 +6642,282 @@ int ath10k_wmi_cmd_init(struct ath10k *ar) config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC); config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES); - buf = ath10k_wmi_alloc_skb(sizeof(*cmd)); + buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items, + ar->wmi.num_mem_chunks)); if (!buf) - return -ENOMEM; + return ERR_PTR(-ENOMEM); cmd = (struct wmi_init_cmd *)buf->data; - cmd->num_host_mem_chunks = 0; + memcpy(&cmd->resource_config, &config, sizeof(config)); + ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks); - ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n"); - return ath10k_wmi_cmd_send(ar, buf, WMI_INIT_CMDID); + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n"); + return buf; } -static int ath10k_wmi_start_scan_calc_len(const struct wmi_start_scan_arg *arg) +static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar) { - int len; + struct wmi_init_cmd_10x *cmd; + struct sk_buff *buf; + struct wmi_resource_config_10x config = {}; + u32 val; + + config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); + config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); + config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS); + config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS); + config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT); + config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK); + config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK); + config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI); + config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode); + config.scan_max_pending_reqs = + __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS); - len = sizeof(struct wmi_start_scan_cmd); + config.bmiss_offload_max_vdev = + __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV); - if (arg->ie_len) { - if (!arg->ie) - return -EINVAL; - if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN) - return -EINVAL; + config.roam_offload_max_vdev = + __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV); + + config.roam_offload_max_ap_profiles = + __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES); + + config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS); + config.num_mcast_table_elems = + __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS); + + config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE); + config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE); + config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES); + config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE); + config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM); + + val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; + config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val); + + config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG); + + config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC); + config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES); + + buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items, + ar->wmi.num_mem_chunks)); + if (!buf) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_init_cmd_10x *)buf->data; + + memcpy(&cmd->resource_config, &config, sizeof(config)); + ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n"); + return buf; +} + +static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar) +{ + struct wmi_init_cmd_10_2 *cmd; + struct sk_buff *buf; + struct wmi_resource_config_10x config = {}; + u32 val, features; + + config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); + config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS); + + if (ath10k_peer_stats_enabled(ar)) { + config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS); + config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS); + } else { + config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); + config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS); + } + + config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT); + config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK); + config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK); + config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI); + config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode); + + config.scan_max_pending_reqs = + __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS); + + config.bmiss_offload_max_vdev = + __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV); + + config.roam_offload_max_vdev = + __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV); + + config.roam_offload_max_ap_profiles = + __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES); + + config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS); + config.num_mcast_table_elems = + __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS); + + config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE); + config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE); + config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES); + config.dma_burst_size = __cpu_to_le32(TARGET_10_2_DMA_BURST_SIZE); + config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM); + + val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; + config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val); + + config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG); + + config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC); + config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES); + + buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items, + ar->wmi.num_mem_chunks)); + if (!buf) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_init_cmd_10_2 *)buf->data; + + features = WMI_10_2_RX_BATCH_MODE; + + if (test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) && + test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map)) + features |= WMI_10_2_COEX_GPIO; + + if (ath10k_peer_stats_enabled(ar)) + features |= WMI_10_2_PEER_STATS; + + if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map)) + features |= WMI_10_2_BSS_CHAN_INFO; + + cmd->resource_config.feature_mask = __cpu_to_le32(features); + + memcpy(&cmd->resource_config.common, &config, sizeof(config)); + ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n"); + return buf; +} + +static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar) +{ + struct wmi_init_cmd_10_4 *cmd; + struct sk_buff *buf; + struct wmi_resource_config_10_4 config = {}; + + config.num_vdevs = __cpu_to_le32(ar->max_num_vdevs); + config.num_peers = __cpu_to_le32(ar->max_num_peers); + config.num_active_peers = __cpu_to_le32(ar->num_active_peers); + config.num_tids = __cpu_to_le32(ar->num_tids); + + config.num_offload_peers = __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_PEERS); + config.num_offload_reorder_buffs = + __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS); + config.num_peer_keys = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS); + config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT); + config.tx_chain_mask = __cpu_to_le32(ar->hw_params.tx_chain_mask); + config.rx_chain_mask = __cpu_to_le32(ar->hw_params.rx_chain_mask); + + config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri[2] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri[3] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_HI_PRI); + + config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode); + config.scan_max_pending_req = __cpu_to_le32(TARGET_10_4_SCAN_MAX_REQS); + config.bmiss_offload_max_vdev = + __cpu_to_le32(TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV); + config.roam_offload_max_vdev = + __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV); + config.roam_offload_max_ap_profiles = + __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES); + config.num_mcast_groups = __cpu_to_le32(TARGET_10_4_NUM_MCAST_GROUPS); + config.num_mcast_table_elems = + __cpu_to_le32(TARGET_10_4_NUM_MCAST_TABLE_ELEMS); + + config.mcast2ucast_mode = __cpu_to_le32(TARGET_10_4_MCAST2UCAST_MODE); + config.tx_dbg_log_size = __cpu_to_le32(TARGET_10_4_TX_DBG_LOG_SIZE); + config.num_wds_entries = __cpu_to_le32(TARGET_10_4_NUM_WDS_ENTRIES); + config.dma_burst_size = __cpu_to_le32(TARGET_10_4_DMA_BURST_SIZE); + config.mac_aggr_delim = __cpu_to_le32(TARGET_10_4_MAC_AGGR_DELIM); + + config.rx_skip_defrag_timeout_dup_detection_check = + __cpu_to_le32(TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK); + + config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG); + config.gtk_offload_max_vdev = + __cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV); + config.num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx); + config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS); + config.max_peer_ext_stats = + __cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS); + config.smart_ant_cap = __cpu_to_le32(TARGET_10_4_SMART_ANT_CAP); + + config.bk_minfree = __cpu_to_le32(TARGET_10_4_BK_MIN_FREE); + config.be_minfree = __cpu_to_le32(TARGET_10_4_BE_MIN_FREE); + config.vi_minfree = __cpu_to_le32(TARGET_10_4_VI_MIN_FREE); + config.vo_minfree = __cpu_to_le32(TARGET_10_4_VO_MIN_FREE); + + config.rx_batchmode = __cpu_to_le32(TARGET_10_4_RX_BATCH_MODE); + config.tt_support = + __cpu_to_le32(TARGET_10_4_THERMAL_THROTTLING_CONFIG); + config.atf_config = __cpu_to_le32(TARGET_10_4_ATF_CONFIG); + config.iphdr_pad_config = __cpu_to_le32(TARGET_10_4_IPHDR_PAD_CONFIG); + config.qwrap_config = __cpu_to_le32(TARGET_10_4_QWRAP_CONFIG); + + buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items, + ar->wmi.num_mem_chunks)); + if (!buf) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_init_cmd_10_4 *)buf->data; + memcpy(&cmd->resource_config, &config, sizeof(config)); + ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.4\n"); + return buf; +} + +int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg) +{ + if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN) + return -EINVAL; + if (arg->n_channels > ARRAY_SIZE(arg->channels)) + return -EINVAL; + if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID) + return -EINVAL; + if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID) + return -EINVAL; + + return 0; +} +static size_t +ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg) +{ + int len = 0; + + if (arg->ie_len) { len += sizeof(struct wmi_ie_data); len += roundup(arg->ie_len, 4); } if (arg->n_channels) { - if (!arg->channels) - return -EINVAL; - if (arg->n_channels > ARRAY_SIZE(arg->channels)) - return -EINVAL; - len += sizeof(struct wmi_chan_list); len += sizeof(__le32) * arg->n_channels; } if (arg->n_ssids) { - if (!arg->ssids) - return -EINVAL; - if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID) - return -EINVAL; - len += sizeof(struct wmi_ssid_list); len += sizeof(struct wmi_ssid) * arg->n_ssids; } if (arg->n_bssids) { - if (!arg->bssids) - return -EINVAL; - if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID) - return -EINVAL; - len += sizeof(struct wmi_bssid_list); len += sizeof(struct wmi_mac_addr) * arg->n_bssids; } @@ -1343,28 +6925,11 @@ static int ath10k_wmi_start_scan_calc_len(const struct wmi_start_scan_arg *arg) return len; } -int ath10k_wmi_start_scan(struct ath10k *ar, - const struct wmi_start_scan_arg *arg) +void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn, + const struct wmi_start_scan_arg *arg) { - struct wmi_start_scan_cmd *cmd; - struct sk_buff *skb; - struct wmi_ie_data *ie; - struct wmi_chan_list *channels; - struct wmi_ssid_list *ssids; - struct wmi_bssid_list *bssids; u32 scan_id; u32 scan_req_id; - int off; - int len = 0; - int i; - - len = ath10k_wmi_start_scan_calc_len(arg); - if (len < 0) - return len; /* len contains error code here */ - - skb = ath10k_wmi_alloc_skb(len); - if (!skb) - return -ENOMEM; scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX; scan_id |= arg->scan_id; @@ -1372,41 +6937,49 @@ int ath10k_wmi_start_scan(struct ath10k *ar, scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX; scan_req_id |= arg->scan_req_id; - cmd = (struct wmi_start_scan_cmd *)skb->data; - cmd->scan_id = __cpu_to_le32(scan_id); - cmd->scan_req_id = __cpu_to_le32(scan_req_id); - cmd->vdev_id = __cpu_to_le32(arg->vdev_id); - cmd->scan_priority = __cpu_to_le32(arg->scan_priority); - cmd->notify_scan_events = __cpu_to_le32(arg->notify_scan_events); - cmd->dwell_time_active = __cpu_to_le32(arg->dwell_time_active); - cmd->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive); - cmd->min_rest_time = __cpu_to_le32(arg->min_rest_time); - cmd->max_rest_time = __cpu_to_le32(arg->max_rest_time); - cmd->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time); - cmd->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time); - cmd->idle_time = __cpu_to_le32(arg->idle_time); - cmd->max_scan_time = __cpu_to_le32(arg->max_scan_time); - cmd->probe_delay = __cpu_to_le32(arg->probe_delay); - cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags); - - /* TLV list starts after fields included in the struct */ - off = sizeof(*cmd); + cmn->scan_id = __cpu_to_le32(scan_id); + cmn->scan_req_id = __cpu_to_le32(scan_req_id); + cmn->vdev_id = __cpu_to_le32(arg->vdev_id); + cmn->scan_priority = __cpu_to_le32(arg->scan_priority); + cmn->notify_scan_events = __cpu_to_le32(arg->notify_scan_events); + cmn->dwell_time_active = __cpu_to_le32(arg->dwell_time_active); + cmn->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive); + cmn->min_rest_time = __cpu_to_le32(arg->min_rest_time); + cmn->max_rest_time = __cpu_to_le32(arg->max_rest_time); + cmn->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time); + cmn->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time); + cmn->idle_time = __cpu_to_le32(arg->idle_time); + cmn->max_scan_time = __cpu_to_le32(arg->max_scan_time); + cmn->probe_delay = __cpu_to_le32(arg->probe_delay); + cmn->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags); +} + +static void +ath10k_wmi_put_start_scan_tlvs(u8 *tlvs, + const struct wmi_start_scan_arg *arg) +{ + struct wmi_ie_data *ie; + struct wmi_chan_list *channels; + struct wmi_ssid_list *ssids; + struct wmi_bssid_list *bssids; + void *ptr = tlvs; + int i; if (arg->n_channels) { - channels = (void *)skb->data + off; + channels = ptr; channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG); channels->num_chan = __cpu_to_le32(arg->n_channels); for (i = 0; i < arg->n_channels; i++) - channels->channel_list[i] = - __cpu_to_le32(arg->channels[i]); + channels->channel_list[i].freq = + __cpu_to_le16(arg->channels[i]); - off += sizeof(*channels); - off += sizeof(__le32) * arg->n_channels; + ptr += sizeof(*channels); + ptr += sizeof(__le32) * arg->n_channels; } if (arg->n_ssids) { - ssids = (void *)skb->data + off; + ssids = ptr; ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG); ssids->num_ssids = __cpu_to_le32(arg->n_ssids); @@ -1418,41 +6991,88 @@ int ath10k_wmi_start_scan(struct ath10k *ar, arg->ssids[i].len); } - off += sizeof(*ssids); - off += sizeof(struct wmi_ssid) * arg->n_ssids; + ptr += sizeof(*ssids); + ptr += sizeof(struct wmi_ssid) * arg->n_ssids; } if (arg->n_bssids) { - bssids = (void *)skb->data + off; + bssids = ptr; bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG); bssids->num_bssid = __cpu_to_le32(arg->n_bssids); for (i = 0; i < arg->n_bssids; i++) - memcpy(&bssids->bssid_list[i], - arg->bssids[i].bssid, - ETH_ALEN); + ether_addr_copy(bssids->bssid_list[i].addr, + arg->bssids[i].bssid); - off += sizeof(*bssids); - off += sizeof(struct wmi_mac_addr) * arg->n_bssids; + ptr += sizeof(*bssids); + ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids; } if (arg->ie_len) { - ie = (void *)skb->data + off; + ie = ptr; ie->tag = __cpu_to_le32(WMI_IE_TAG); ie->ie_len = __cpu_to_le32(arg->ie_len); memcpy(ie->ie_data, arg->ie, arg->ie_len); - off += sizeof(*ie); - off += roundup(arg->ie_len, 4); + ptr += sizeof(*ie); + ptr += roundup(arg->ie_len, 4); } +} - if (off != skb->len) { - dev_kfree_skb(skb); - return -EINVAL; - } +static struct sk_buff * +ath10k_wmi_op_gen_start_scan(struct ath10k *ar, + const struct wmi_start_scan_arg *arg) +{ + struct wmi_start_scan_cmd *cmd; + struct sk_buff *skb; + size_t len; + int ret; - ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n"); - return ath10k_wmi_cmd_send(ar, skb, WMI_START_SCAN_CMDID); + ret = ath10k_wmi_start_scan_verify(arg); + if (ret) + return ERR_PTR(ret); + + len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_start_scan_cmd *)skb->data; + + ath10k_wmi_put_start_scan_common(&cmd->common, arg); + ath10k_wmi_put_start_scan_tlvs(cmd->tlvs, arg); + + cmd->burst_duration_ms = __cpu_to_le32(0); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar, + const struct wmi_start_scan_arg *arg) +{ + struct wmi_10x_start_scan_cmd *cmd; + struct sk_buff *skb; + size_t len; + int ret; + + ret = ath10k_wmi_start_scan_verify(arg); + if (ret) + return ERR_PTR(ret); + + len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_10x_start_scan_cmd *)skb->data; + + ath10k_wmi_put_start_scan_common(&cmd->common, arg); + ath10k_wmi_put_start_scan_tlvs(cmd->tlvs, arg); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n"); + return skb; } void ath10k_wmi_start_scan_init(struct ath10k *ar, @@ -1468,20 +7088,22 @@ void ath10k_wmi_start_scan_init(struct ath10k *ar, arg->repeat_probe_time = 0; arg->probe_spacing_time = 0; arg->idle_time = 0; - arg->max_scan_time = 5000; + arg->max_scan_time = 20000; arg->probe_delay = 5; arg->notify_scan_events = WMI_SCAN_EVENT_STARTED | WMI_SCAN_EVENT_COMPLETED | WMI_SCAN_EVENT_BSS_CHANNEL | WMI_SCAN_EVENT_FOREIGN_CHANNEL + | WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT | WMI_SCAN_EVENT_DEQUEUED; - arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES; arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT; arg->n_bssids = 1; arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF"; } -int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) +static struct sk_buff * +ath10k_wmi_op_gen_stop_scan(struct ath10k *ar, + const struct wmi_stop_scan_arg *arg) { struct wmi_stop_scan_cmd *cmd; struct sk_buff *skb; @@ -1489,13 +7111,13 @@ int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) u32 req_id; if (arg->req_id > 0xFFF) - return -EINVAL; + return ERR_PTR(-EINVAL); if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF) - return -EINVAL; + return ERR_PTR(-EINVAL); - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); scan_id = arg->u.scan_id; scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX; @@ -1509,84 +7131,78 @@ int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) cmd->scan_id = __cpu_to_le32(scan_id); cmd->scan_req_id = __cpu_to_le32(req_id); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n", arg->req_id, arg->req_type, arg->u.scan_id); - return ath10k_wmi_cmd_send(ar, skb, WMI_STOP_SCAN_CMDID); + return skb; } -int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, - enum wmi_vdev_type type, - enum wmi_vdev_subtype subtype, - const u8 macaddr[ETH_ALEN]) +static struct sk_buff * +ath10k_wmi_op_gen_vdev_create(struct ath10k *ar, u32 vdev_id, + enum wmi_vdev_type type, + enum wmi_vdev_subtype subtype, + const u8 macaddr[ETH_ALEN]) { struct wmi_vdev_create_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); cmd = (struct wmi_vdev_create_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->vdev_type = __cpu_to_le32(type); cmd->vdev_subtype = __cpu_to_le32(subtype); - memcpy(cmd->vdev_macaddr.addr, macaddr, ETH_ALEN); + ether_addr_copy(cmd->vdev_macaddr.addr, macaddr); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI vdev create: id %d type %d subtype %d macaddr %pM\n", vdev_id, type, subtype, macaddr); - - return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_CREATE_CMDID); + return skb; } -int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) +static struct sk_buff * +ath10k_wmi_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id) { struct wmi_vdev_delete_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); cmd = (struct wmi_vdev_delete_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id); - - return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DELETE_CMDID); + return skb; } -static int ath10k_wmi_vdev_start_restart(struct ath10k *ar, - const struct wmi_vdev_start_request_arg *arg, - enum wmi_cmd_id cmd_id) +static struct sk_buff * +ath10k_wmi_op_gen_vdev_start(struct ath10k *ar, + const struct wmi_vdev_start_request_arg *arg, + bool restart) { struct wmi_vdev_start_request_cmd *cmd; struct sk_buff *skb; const char *cmdname; u32 flags = 0; - if (cmd_id != WMI_VDEV_START_REQUEST_CMDID && - cmd_id != WMI_VDEV_RESTART_REQUEST_CMDID) - return -EINVAL; - if (WARN_ON(arg->ssid && arg->ssid_len == 0)) - return -EINVAL; if (WARN_ON(arg->hidden_ssid && !arg->ssid)) - return -EINVAL; + return ERR_PTR(-EINVAL); if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) - return -EINVAL; + return ERR_PTR(-EINVAL); - if (cmd_id == WMI_VDEV_START_REQUEST_CMDID) - cmdname = "start"; - else if (cmd_id == WMI_VDEV_RESTART_REQUEST_CMDID) + if (restart) cmdname = "restart"; else - return -EINVAL; /* should not happen, we already check cmd_id */ + cmdname = "start"; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); if (arg->hidden_ssid) flags |= WMI_VDEV_START_HIDDEN_SSID; @@ -1607,132 +7223,118 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar, memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len); } - cmd->chan.mhz = __cpu_to_le32(arg->channel.freq); + ath10k_wmi_put_wmi_channel(ar, &cmd->chan, &arg->channel); - cmd->chan.band_center_freq1 = - __cpu_to_le32(arg->channel.band_center_freq1); + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n", + cmdname, arg->vdev_id, + flags, arg->channel.freq, arg->channel.mode, + cmd->chan.flags, arg->channel.max_power); - cmd->chan.mode = arg->channel.mode; - cmd->chan.min_power = arg->channel.min_power; - cmd->chan.max_power = arg->channel.max_power; - cmd->chan.reg_power = arg->channel.max_reg_power; - cmd->chan.reg_classid = arg->channel.reg_class_id; - cmd->chan.antenna_max = arg->channel.max_antenna_gain; - - ath10k_dbg(ATH10K_DBG_WMI, - "wmi vdev %s id 0x%x freq %d, mode %d, ch_flags: 0x%0X," - "max_power: %d\n", cmdname, arg->vdev_id, arg->channel.freq, - arg->channel.mode, flags, arg->channel.max_power); - - return ath10k_wmi_cmd_send(ar, skb, cmd_id); -} - -int ath10k_wmi_vdev_start(struct ath10k *ar, - const struct wmi_vdev_start_request_arg *arg) -{ - return ath10k_wmi_vdev_start_restart(ar, arg, - WMI_VDEV_START_REQUEST_CMDID); -} - -int ath10k_wmi_vdev_restart(struct ath10k *ar, - const struct wmi_vdev_start_request_arg *arg) -{ - return ath10k_wmi_vdev_start_restart(ar, arg, - WMI_VDEV_RESTART_REQUEST_CMDID); + return skb; } -int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) +static struct sk_buff * +ath10k_wmi_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id) { struct wmi_vdev_stop_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); cmd = (struct wmi_vdev_stop_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); - ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id); - - return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_STOP_CMDID); + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id); + return skb; } -int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) +static struct sk_buff * +ath10k_wmi_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, + const u8 *bssid) { struct wmi_vdev_up_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); cmd = (struct wmi_vdev_up_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->vdev_assoc_id = __cpu_to_le32(aid); - memcpy(&cmd->vdev_bssid.addr, bssid, 6); + ether_addr_copy(cmd->vdev_bssid.addr, bssid); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n", vdev_id, aid, bssid); - - return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_UP_CMDID); + return skb; } -int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) +static struct sk_buff * +ath10k_wmi_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id) { struct wmi_vdev_down_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); cmd = (struct wmi_vdev_down_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt vdev down id 0x%x\n", vdev_id); - - return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DOWN_CMDID); + return skb; } -int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, - enum wmi_vdev_param param_id, u32 param_value) +static struct sk_buff * +ath10k_wmi_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id, + u32 param_id, u32 param_value) { struct wmi_vdev_set_param_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) { + ath10k_dbg(ar, ATH10K_DBG_WMI, + "vdev param %d not supported by firmware\n", + param_id); + return ERR_PTR(-EOPNOTSUPP); + } + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); cmd = (struct wmi_vdev_set_param_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->param_id = __cpu_to_le32(param_id); cmd->param_value = __cpu_to_le32(param_value); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev id 0x%x set param %d value %d\n", vdev_id, param_id, param_value); - - return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_SET_PARAM_CMDID); + return skb; } -int ath10k_wmi_vdev_install_key(struct ath10k *ar, - const struct wmi_vdev_install_key_arg *arg) +static struct sk_buff * +ath10k_wmi_op_gen_vdev_install_key(struct ath10k *ar, + const struct wmi_vdev_install_key_arg *arg) { struct wmi_vdev_install_key_cmd *cmd; struct sk_buff *skb; if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL) - return -EINVAL; + return ERR_PTR(-EINVAL); if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL) - return -EINVAL; + return ERR_PTR(-EINVAL); - skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->key_len); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len); if (!skb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); cmd = (struct wmi_vdev_install_key_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(arg->vdev_id); @@ -1744,238 +7346,307 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar, cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len); if (arg->macaddr) - memcpy(cmd->peer_macaddr.addr, arg->macaddr, ETH_ALEN); + ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr); if (arg->key_data) memcpy(cmd->key_data, arg->key_data, arg->key_len); - return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID); + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi vdev install key idx %d cipher %d len %d\n", + arg->key_idx, arg->key_cipher, arg->key_len); + return skb; } -int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, - const u8 peer_addr[ETH_ALEN]) +static struct sk_buff * +ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k *ar, + const struct wmi_vdev_spectral_conf_arg *arg) +{ + struct wmi_vdev_spectral_conf_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(arg->vdev_id); + cmd->scan_count = __cpu_to_le32(arg->scan_count); + cmd->scan_period = __cpu_to_le32(arg->scan_period); + cmd->scan_priority = __cpu_to_le32(arg->scan_priority); + cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size); + cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena); + cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena); + cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref); + cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay); + cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr); + cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr); + cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode); + cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode); + cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr); + cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format); + cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode); + cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale); + cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj); + cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask); + + return skb; +} + +static struct sk_buff * +ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, + u32 trigger, u32 enable) +{ + struct wmi_vdev_spectral_enable_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->trigger_cmd = __cpu_to_le32(trigger); + cmd->enable_cmd = __cpu_to_le32(enable); + + return skb; +} + +static struct sk_buff * +ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN], + enum wmi_peer_type peer_type) { struct wmi_peer_create_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); cmd = (struct wmi_peer_create_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); - memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); + ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); + cmd->peer_type = __cpu_to_le32(peer_type); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi peer create vdev_id %d peer_addr %pM\n", vdev_id, peer_addr); - return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_CREATE_CMDID); + return skb; } -int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, - const u8 peer_addr[ETH_ALEN]) +static struct sk_buff * +ath10k_wmi_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN]) { struct wmi_peer_delete_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); cmd = (struct wmi_peer_delete_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); - memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); + ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi peer delete vdev_id %d peer_addr %pM\n", vdev_id, peer_addr); - return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_DELETE_CMDID); + return skb; } -int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, - const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) +static struct sk_buff * +ath10k_wmi_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) { struct wmi_peer_flush_tids_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); cmd = (struct wmi_peer_flush_tids_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap); - memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); + ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n", vdev_id, peer_addr, tid_bitmap); - return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_FLUSH_TIDS_CMDID); + return skb; } -int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, - const u8 *peer_addr, enum wmi_peer_param param_id, - u32 param_value) +static struct sk_buff * +ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id, + const u8 *peer_addr, + enum wmi_peer_param param_id, + u32 param_value) { struct wmi_peer_set_param_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); cmd = (struct wmi_peer_set_param_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->param_id = __cpu_to_le32(param_id); cmd->param_value = __cpu_to_le32(param_value); - memcpy(&cmd->peer_macaddr.addr, peer_addr, 6); + ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev %d peer 0x%pM set param %d value %d\n", vdev_id, peer_addr, param_id, param_value); + return skb; +} + +static struct sk_buff *ath10k_wmi_op_gen_gpio_config(struct ath10k *ar, + u32 gpio_num, u32 input, + u32 pull_type, u32 intr_mode) +{ + struct wmi_gpio_config_cmd *cmd; + struct sk_buff *skb; - return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_SET_PARAM_CMDID); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_gpio_config_cmd *)skb->data; + cmd->pull_type = __cpu_to_le32(pull_type); + cmd->gpio_num = __cpu_to_le32(gpio_num); + cmd->input = __cpu_to_le32(input); + cmd->intr_mode = __cpu_to_le32(intr_mode); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi gpio_config gpio_num 0x%08x input 0x%08x pull_type 0x%08x intr_mode 0x%08x\n", + gpio_num, input, pull_type, intr_mode); + + return skb; } -int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, - enum wmi_sta_ps_mode psmode) +static struct sk_buff *ath10k_wmi_op_gen_gpio_output(struct ath10k *ar, + u32 gpio_num, u32 set) +{ + struct wmi_gpio_output_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_gpio_output_cmd *)skb->data; + cmd->gpio_num = __cpu_to_le32(gpio_num); + cmd->set = __cpu_to_le32(set); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi gpio_output gpio_num 0x%08x set 0x%08x\n", + gpio_num, set); + + return skb; +} + +static struct sk_buff * +ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id, + enum wmi_sta_ps_mode psmode) { struct wmi_sta_powersave_mode_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->sta_ps_mode = __cpu_to_le32(psmode); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi set powersave id 0x%x mode %d\n", vdev_id, psmode); - - return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_MODE_CMDID); + return skb; } -int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, - enum wmi_sta_powersave_param param_id, - u32 value) +static struct sk_buff * +ath10k_wmi_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id, + enum wmi_sta_powersave_param param_id, + u32 value) { struct wmi_sta_powersave_param_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); cmd = (struct wmi_sta_powersave_param_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->param_id = __cpu_to_le32(param_id); cmd->param_value = __cpu_to_le32(value); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_STA, "wmi sta ps param vdev_id 0x%x param %d value %d\n", vdev_id, param_id, value); - return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_PARAM_CMDID); + return skb; } -int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, - enum wmi_ap_ps_peer_param param_id, u32 value) +static struct sk_buff * +ath10k_wmi_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac, + enum wmi_ap_ps_peer_param param_id, u32 value) { struct wmi_ap_ps_peer_cmd *cmd; struct sk_buff *skb; if (!mac) - return -EINVAL; + return ERR_PTR(-EINVAL); - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); cmd = (struct wmi_ap_ps_peer_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->param_id = __cpu_to_le32(param_id); cmd->param_value = __cpu_to_le32(value); - memcpy(&cmd->peer_macaddr, mac, ETH_ALEN); + ether_addr_copy(cmd->peer_macaddr.addr, mac); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n", vdev_id, param_id, value, mac); - - return ath10k_wmi_cmd_send(ar, skb, WMI_AP_PS_PEER_PARAM_CMDID); + return skb; } -int ath10k_wmi_scan_chan_list(struct ath10k *ar, - const struct wmi_scan_chan_list_arg *arg) +static struct sk_buff * +ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar, + const struct wmi_scan_chan_list_arg *arg) { struct wmi_scan_chan_list_cmd *cmd; struct sk_buff *skb; struct wmi_channel_arg *ch; struct wmi_channel *ci; - int len; int i; - len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel); - - skb = ath10k_wmi_alloc_skb(len); + skb = ath10k_wmi_alloc_skb(ar, struct_size(cmd, chan_info, arg->n_channels)); if (!skb) - return -EINVAL; + return ERR_PTR(-EINVAL); cmd = (struct wmi_scan_chan_list_cmd *)skb->data; cmd->num_scan_chans = __cpu_to_le32(arg->n_channels); for (i = 0; i < arg->n_channels; i++) { - u32 flags = 0; - ch = &arg->channels[i]; ci = &cmd->chan_info[i]; - if (ch->passive) - flags |= WMI_CHAN_FLAG_PASSIVE; - if (ch->allow_ibss) - flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED; - if (ch->allow_ht) - flags |= WMI_CHAN_FLAG_ALLOW_HT; - if (ch->allow_vht) - flags |= WMI_CHAN_FLAG_ALLOW_VHT; - if (ch->ht40plus) - flags |= WMI_CHAN_FLAG_HT40_PLUS; - - ci->mhz = __cpu_to_le32(ch->freq); - ci->band_center_freq1 = __cpu_to_le32(ch->freq); - ci->band_center_freq2 = 0; - ci->min_power = ch->min_power; - ci->max_power = ch->max_power; - ci->reg_power = ch->max_reg_power; - ci->antenna_max = ch->max_antenna_gain; - ci->antenna_max = 0; - - /* mode & flags share storage */ - ci->mode = ch->mode; - ci->flags |= __cpu_to_le32(flags); + ath10k_wmi_put_wmi_channel(ar, ci, ch); } - return ath10k_wmi_cmd_send(ar, skb, WMI_SCAN_CHAN_LIST_CMDID); + return skb; } -int ath10k_wmi_peer_assoc(struct ath10k *ar, - const struct wmi_peer_assoc_complete_arg *arg) +static void +ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf, + const struct wmi_peer_assoc_complete_arg *arg) { - struct wmi_peer_assoc_complete_cmd *cmd; - struct sk_buff *skb; - - if (arg->peer_mpdu_density > 16) - return -EINVAL; - if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES) - return -EINVAL; - if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES) - return -EINVAL; - - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); - if (!skb) - return -ENOMEM; + struct wmi_common_peer_assoc_complete_cmd *cmd = buf; - cmd = (struct wmi_peer_assoc_complete_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(arg->vdev_id); cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1); cmd->peer_associd = __cpu_to_le32(arg->peer_aid); @@ -1990,7 +7661,7 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar, cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps); cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode); - memcpy(cmd->peer_macaddr.addr, arg->addr, ETH_ALEN); + ether_addr_copy(cmd->peer_macaddr.addr, arg->addr); cmd->peer_legacy_rates.num_rates = __cpu_to_le32(arg->peer_legacy_rates.num_rates); @@ -2010,31 +7681,239 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar, __cpu_to_le32(arg->peer_vht_rates.tx_max_rate); cmd->peer_vht_rates.tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set); +} + +static void +ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf, + const struct wmi_peer_assoc_complete_arg *arg) +{ + struct wmi_main_peer_assoc_complete_cmd *cmd = buf; + + ath10k_wmi_peer_assoc_fill(ar, buf, arg); + memset(cmd->peer_ht_info, 0, sizeof(cmd->peer_ht_info)); +} + +static void +ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf, + const struct wmi_peer_assoc_complete_arg *arg) +{ + ath10k_wmi_peer_assoc_fill(ar, buf, arg); +} + +static void +ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf, + const struct wmi_peer_assoc_complete_arg *arg) +{ + struct wmi_10_2_peer_assoc_complete_cmd *cmd = buf; + int max_mcs, max_nss; + u32 info0; + + /* TODO: Is using max values okay with firmware? */ + max_mcs = 0xf; + max_nss = 0xf; + + info0 = SM(max_mcs, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX) | + SM(max_nss, WMI_PEER_ASSOC_INFO0_MAX_NSS); + + ath10k_wmi_peer_assoc_fill(ar, buf, arg); + cmd->info0 = __cpu_to_le32(info0); +} + +static void +ath10k_wmi_peer_assoc_fill_10_4(struct ath10k *ar, void *buf, + const struct wmi_peer_assoc_complete_arg *arg) +{ + struct wmi_10_4_peer_assoc_complete_cmd *cmd = buf; + + ath10k_wmi_peer_assoc_fill_10_2(ar, buf, arg); + cmd->peer_bw_rxnss_override = + __cpu_to_le32(arg->peer_bw_rxnss_override); +} + +static int +ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg *arg) +{ + if (arg->peer_mpdu_density > 16) + return -EINVAL; + if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES) + return -EINVAL; + if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES) + return -EINVAL; - return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID); + return 0; } -int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg) +static struct sk_buff * +ath10k_wmi_op_gen_peer_assoc(struct ath10k *ar, + const struct wmi_peer_assoc_complete_arg *arg) { - struct wmi_bcn_tx_cmd *cmd; + size_t len = sizeof(struct wmi_main_peer_assoc_complete_cmd); struct sk_buff *skb; + int ret; + + ret = ath10k_wmi_peer_assoc_check_arg(arg); + if (ret) + return ERR_PTR(ret); - skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->bcn_len); + skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); + + ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi peer assoc vdev %d addr %pM (%s)\n", + arg->vdev_id, arg->addr, + arg->peer_reassoc ? "reassociate" : "new"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k *ar, + const struct wmi_peer_assoc_complete_arg *arg) +{ + size_t len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd); + struct sk_buff *skb; + int ret; + + ret = ath10k_wmi_peer_assoc_check_arg(arg); + if (ret) + return ERR_PTR(ret); + + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi peer assoc vdev %d addr %pM (%s)\n", + arg->vdev_id, arg->addr, + arg->peer_reassoc ? "reassociate" : "new"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k *ar, + const struct wmi_peer_assoc_complete_arg *arg) +{ + size_t len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd); + struct sk_buff *skb; + int ret; + + ret = ath10k_wmi_peer_assoc_check_arg(arg); + if (ret) + return ERR_PTR(ret); + + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi peer assoc vdev %d addr %pM (%s)\n", + arg->vdev_id, arg->addr, + arg->peer_reassoc ? "reassociate" : "new"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_10_4_op_gen_peer_assoc(struct ath10k *ar, + const struct wmi_peer_assoc_complete_arg *arg) +{ + size_t len = sizeof(struct wmi_10_4_peer_assoc_complete_cmd); + struct sk_buff *skb; + int ret; + + ret = ath10k_wmi_peer_assoc_check_arg(arg); + if (ret) + return ERR_PTR(ret); + + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ath10k_wmi_peer_assoc_fill_10_4(ar, skb->data, arg); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi peer assoc vdev %d addr %pM (%s)\n", + arg->vdev_id, arg->addr, + arg->peer_reassoc ? "reassociate" : "new"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar) +{ + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, 0); + if (!skb) + return ERR_PTR(-ENOMEM); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_10_2_op_gen_pdev_bss_chan_info(struct ath10k *ar, + enum wmi_bss_survey_req_type type) +{ + struct wmi_pdev_chan_info_req_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_pdev_chan_info_req_cmd *)skb->data; + cmd->type = __cpu_to_le32(type); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi pdev bss info request type %d\n", type); + + return skb; +} + +/* This function assumes the beacon is already DMA mapped */ +static struct sk_buff * +ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn, + size_t bcn_len, u32 bcn_paddr, bool dtim_zero, + bool deliver_cab) +{ + struct wmi_bcn_tx_ref_cmd *cmd; + struct sk_buff *skb; + struct ieee80211_hdr *hdr; + u16 fc; - cmd = (struct wmi_bcn_tx_cmd *)skb->data; - cmd->hdr.vdev_id = __cpu_to_le32(arg->vdev_id); - cmd->hdr.tx_rate = __cpu_to_le32(arg->tx_rate); - cmd->hdr.tx_power = __cpu_to_le32(arg->tx_power); - cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len); - memcpy(cmd->bcn, arg->bcn, arg->bcn_len); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + hdr = (struct ieee80211_hdr *)bcn; + fc = le16_to_cpu(hdr->frame_control); + + cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->data_len = __cpu_to_le32(bcn_len); + cmd->data_ptr = __cpu_to_le32(bcn_paddr); + cmd->msdu_id = 0; + cmd->frame_control = __cpu_to_le32(fc); + cmd->flags = 0; + cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA); - return ath10k_wmi_cmd_send(ar, skb, WMI_BCN_TX_CMDID); + if (dtim_zero) + cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO); + + if (deliver_cab) + cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB); + + return skb; } -static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params, - const struct wmi_wmm_params_arg *arg) +void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params, + const struct wmi_wmm_params_arg *arg) { params->cwmin = __cpu_to_le32(arg->cwmin); params->cwmax = __cpu_to_le32(arg->cwmax); @@ -2044,38 +7923,1719 @@ static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params, params->no_ack = __cpu_to_le32(arg->no_ack); } -int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, - const struct wmi_pdev_set_wmm_params_arg *arg) +static struct sk_buff * +ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar, + const struct wmi_wmm_params_all_arg *arg) { struct wmi_pdev_set_wmm_params *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); cmd = (struct wmi_pdev_set_wmm_params *)skb->data; - ath10k_wmi_pdev_set_wmm_param(&cmd->ac_be, &arg->ac_be); - ath10k_wmi_pdev_set_wmm_param(&cmd->ac_bk, &arg->ac_bk); - ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi); - ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo); + ath10k_wmi_set_wmm_param(&cmd->ac_be, &arg->ac_be); + ath10k_wmi_set_wmm_param(&cmd->ac_bk, &arg->ac_bk); + ath10k_wmi_set_wmm_param(&cmd->ac_vi, &arg->ac_vi); + ath10k_wmi_set_wmm_param(&cmd->ac_vo, &arg->ac_vo); - ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n"); - return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_WMM_PARAMS_CMDID); + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n"); + return skb; } -int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id) +static struct sk_buff * +ath10k_wmi_op_gen_request_stats(struct ath10k *ar, u32 stats_mask) { struct wmi_request_stats_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); cmd = (struct wmi_request_stats_cmd *)skb->data; - cmd->stats_id = __cpu_to_le32(stats_id); + cmd->stats_id = __cpu_to_le32(stats_mask); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats 0x%08x\n", + stats_mask); + return skb; +} + +static struct sk_buff * +ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar, + enum wmi_force_fw_hang_type type, u32 delay_ms) +{ + struct wmi_force_fw_hang_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_force_fw_hang_cmd *)skb->data; + cmd->type = __cpu_to_le32(type); + cmd->delay_ms = __cpu_to_le32(delay_ms); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n", + type, delay_ms); + return skb; +} + +static struct sk_buff * +ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable, + u32 log_level) +{ + struct wmi_dbglog_cfg_cmd *cmd; + struct sk_buff *skb; + u32 cfg; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_dbglog_cfg_cmd *)skb->data; + + if (module_enable) { + cfg = SM(log_level, + ATH10K_DBGLOG_CFG_LOG_LVL); + } else { + /* set back defaults, all modules with WARN level */ + cfg = SM(ATH10K_DBGLOG_LEVEL_WARN, + ATH10K_DBGLOG_CFG_LOG_LVL); + module_enable = ~0; + } + + cmd->module_enable = __cpu_to_le32(module_enable); + cmd->module_valid = __cpu_to_le32(~0); + cmd->config_enable = __cpu_to_le32(cfg); + cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi dbglog cfg modules %08x %08x config %08x %08x\n", + __le32_to_cpu(cmd->module_enable), + __le32_to_cpu(cmd->module_valid), + __le32_to_cpu(cmd->config_enable), + __le32_to_cpu(cmd->config_valid)); + return skb; +} + +static struct sk_buff * +ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable, + u32 log_level) +{ + struct wmi_10_4_dbglog_cfg_cmd *cmd; + struct sk_buff *skb; + u32 cfg; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_10_4_dbglog_cfg_cmd *)skb->data; + + if (module_enable) { + cfg = SM(log_level, + ATH10K_DBGLOG_CFG_LOG_LVL); + } else { + /* set back defaults, all modules with WARN level */ + cfg = SM(ATH10K_DBGLOG_LEVEL_WARN, + ATH10K_DBGLOG_CFG_LOG_LVL); + module_enable = ~0; + } + + cmd->module_enable = __cpu_to_le64(module_enable); + cmd->module_valid = __cpu_to_le64(~0); + cmd->config_enable = __cpu_to_le32(cfg); + cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi dbglog cfg modules 0x%016llx 0x%016llx config %08x %08x\n", + __le64_to_cpu(cmd->module_enable), + __le64_to_cpu(cmd->module_valid), + __le32_to_cpu(cmd->config_enable), + __le32_to_cpu(cmd->config_valid)); + return skb; +} + +static struct sk_buff * +ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap) +{ + struct wmi_pdev_pktlog_enable_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + ev_bitmap &= ATH10K_PKTLOG_ANY; + + cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data; + cmd->ev_bitmap = __cpu_to_le32(ev_bitmap); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi enable pktlog filter 0x%08x\n", + ev_bitmap); + return skb; +} + +static struct sk_buff * +ath10k_wmi_op_gen_pktlog_disable(struct ath10k *ar) +{ + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, 0); + if (!skb) + return ERR_PTR(-ENOMEM); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period, + u32 duration, u32 next_offset, + u32 enabled) +{ + struct wmi_pdev_set_quiet_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_pdev_set_quiet_cmd *)skb->data; + cmd->period = __cpu_to_le32(period); + cmd->duration = __cpu_to_le32(duration); + cmd->next_start = __cpu_to_le32(next_offset); + cmd->enabled = __cpu_to_le32(enabled); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi quiet param: period %u duration %u enabled %d\n", + period, duration, enabled); + return skb; +} + +static struct sk_buff * +ath10k_wmi_op_gen_addba_clear_resp(struct ath10k *ar, u32 vdev_id, + const u8 *mac) +{ + struct wmi_addba_clear_resp_cmd *cmd; + struct sk_buff *skb; + + if (!mac) + return ERR_PTR(-EINVAL); + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_addba_clear_resp_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(vdev_id); + ether_addr_copy(cmd->peer_macaddr.addr, mac); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n", + vdev_id, mac); + return skb; +} + +static struct sk_buff * +ath10k_wmi_op_gen_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, + u32 tid, u32 buf_size) +{ + struct wmi_addba_send_cmd *cmd; + struct sk_buff *skb; + + if (!mac) + return ERR_PTR(-EINVAL); + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_addba_send_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(vdev_id); + ether_addr_copy(cmd->peer_macaddr.addr, mac); + cmd->tid = __cpu_to_le32(tid); + cmd->buffersize = __cpu_to_le32(buf_size); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n", + vdev_id, mac, tid, buf_size); + return skb; +} + +static struct sk_buff * +ath10k_wmi_op_gen_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac, + u32 tid, u32 status) +{ + struct wmi_addba_setresponse_cmd *cmd; + struct sk_buff *skb; + + if (!mac) + return ERR_PTR(-EINVAL); + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_addba_setresponse_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(vdev_id); + ether_addr_copy(cmd->peer_macaddr.addr, mac); + cmd->tid = __cpu_to_le32(tid); + cmd->statuscode = __cpu_to_le32(status); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n", + vdev_id, mac, tid, status); + return skb; +} + +static struct sk_buff * +ath10k_wmi_op_gen_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, + u32 tid, u32 initiator, u32 reason) +{ + struct wmi_delba_send_cmd *cmd; + struct sk_buff *skb; + + if (!mac) + return ERR_PTR(-EINVAL); + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_delba_send_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(vdev_id); + ether_addr_copy(cmd->peer_macaddr.addr, mac); + cmd->tid = __cpu_to_le32(tid); + cmd->initiator = __cpu_to_le32(initiator); + cmd->reasoncode = __cpu_to_le32(reason); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n", + vdev_id, mac, tid, initiator, reason); + return skb; +} + +static struct sk_buff * +ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k *ar, u32 param) +{ + struct wmi_pdev_get_tpc_config_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_pdev_get_tpc_config_cmd *)skb->data; + cmd->param = __cpu_to_le32(param); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi pdev get tpc config param %d\n", param); + return skb; +} + +static void +ath10k_wmi_fw_pdev_base_stats_fill(const struct ath10k_fw_stats_pdev *pdev, + char *buf, u32 *length) +{ + u32 len = *length; + u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; + + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "%30s\n", + "ath10k PDEV stats"); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "================="); + + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Channel noise floor", pdev->ch_noise_floor); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "Channel TX power", pdev->chan_tx_power); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "TX frame count", pdev->tx_frame_count); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "RX frame count", pdev->rx_frame_count); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "RX clear count", pdev->rx_clear_count); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "Cycle count", pdev->cycle_count); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "PHY error count", pdev->phy_err_count); + + *length = len; +} + +static void +ath10k_wmi_fw_pdev_extra_stats_fill(const struct ath10k_fw_stats_pdev *pdev, + char *buf, u32 *length) +{ + u32 len = *length; + u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; + + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "RTS bad count", pdev->rts_bad); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "RTS good count", pdev->rts_good); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "FCS bad count", pdev->fcs_bad); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "No beacon count", pdev->no_beacons); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "MIB int count", pdev->mib_int_count); + + len += scnprintf(buf + len, buf_len - len, "\n"); + *length = len; +} + +static void +ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev *pdev, + char *buf, u32 *length) +{ + u32 len = *length; + u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; + + len += scnprintf(buf + len, buf_len - len, "\n%30s\n", + "ath10k PDEV TX stats"); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "================="); + + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "HTT cookies queued", pdev->comp_queued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "HTT cookies disp.", pdev->comp_delivered); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MSDU queued", pdev->msdu_enqued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDU queued", pdev->mpdu_enqued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MSDUs dropped", pdev->wmm_drop); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Local enqued", pdev->local_enqued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Local freed", pdev->local_freed); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "HW queued", pdev->hw_queued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "PPDUs reaped", pdev->hw_reaped); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Num underruns", pdev->underrun); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "PPDUs cleaned", pdev->tx_abort); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDUs requeued", pdev->mpdus_requeued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Excessive retries", pdev->tx_ko); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "HW rate", pdev->data_rc); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Sched self triggers", pdev->self_triggers); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Dropped due to SW retries", + pdev->sw_retry_failure); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Illegal rate phy errors", + pdev->illgl_rate_phy_err); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Pdev continuous xretry", pdev->pdev_cont_xretry); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "TX timeout", pdev->pdev_tx_timeout); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "PDEV resets", pdev->pdev_resets); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "PHY underrun", pdev->phy_underrun); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDU is more than txop limit", pdev->txop_ovf); + *length = len; +} + +static void +ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev *pdev, + char *buf, u32 *length) +{ + u32 len = *length; + u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; + + len += scnprintf(buf + len, buf_len - len, "\n%30s\n", + "ath10k PDEV RX stats"); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "================="); + + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Mid PPDU route change", + pdev->mid_ppdu_route_change); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Tot. number of statuses", pdev->status_rcvd); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Extra frags on rings 0", pdev->r0_frags); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Extra frags on rings 1", pdev->r1_frags); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Extra frags on rings 2", pdev->r2_frags); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Extra frags on rings 3", pdev->r3_frags); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MSDUs delivered to HTT", pdev->htt_msdus); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDUs delivered to HTT", pdev->htt_mpdus); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MSDUs delivered to stack", pdev->loc_msdus); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDUs delivered to stack", pdev->loc_mpdus); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Oversized AMSDUs", pdev->oversize_amsdu); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "PHY errors", pdev->phy_errs); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "PHY errors drops", pdev->phy_err_drop); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs); + *length = len; +} + +static void +ath10k_wmi_fw_vdev_stats_fill(const struct ath10k_fw_stats_vdev *vdev, + char *buf, u32 *length) +{ + u32 len = *length; + u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; + int i; + + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "vdev id", vdev->vdev_id); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "beacon snr", vdev->beacon_snr); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "data snr", vdev->data_snr); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "num rx frames", vdev->num_rx_frames); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "num rts fail", vdev->num_rts_fail); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "num rts success", vdev->num_rts_success); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "num rx err", vdev->num_rx_err); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "num rx discard", vdev->num_rx_discard); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "num tx not acked", vdev->num_tx_not_acked); + + for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] %u\n", + "num tx frames", i, + vdev->num_tx_frames[i]); + + for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] %u\n", + "num tx frames retries", i, + vdev->num_tx_frames_retries[i]); + + for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] %u\n", + "num tx frames failures", i, + vdev->num_tx_frames_failures[i]); + + for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] 0x%08x\n", + "tx rate history", i, + vdev->tx_rate_history[i]); + + for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] %u\n", + "beacon rssi history", i, + vdev->beacon_rssi_history[i]); + + len += scnprintf(buf + len, buf_len - len, "\n"); + *length = len; +} + +static void +ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer, + char *buf, u32 *length, bool extended_peer) +{ + u32 len = *length; + u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; + + len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", + "Peer MAC address", peer->peer_macaddr); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "Peer RSSI", peer->peer_rssi); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "Peer TX rate", peer->peer_tx_rate); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "Peer RX rate", peer->peer_rx_rate); + if (!extended_peer) + len += scnprintf(buf + len, buf_len - len, "%30s %llu\n", + "Peer RX duration", peer->rx_duration); + + len += scnprintf(buf + len, buf_len - len, "\n"); + *length = len; +} + +static void +ath10k_wmi_fw_extd_peer_stats_fill(const struct ath10k_fw_extd_stats_peer *peer, + char *buf, u32 *length) +{ + u32 len = *length; + u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; + + len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", + "Peer MAC address", peer->peer_macaddr); + len += scnprintf(buf + len, buf_len - len, "%30s %llu\n", + "Peer RX duration", peer->rx_duration); +} + +void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar, + struct ath10k_fw_stats *fw_stats, + char *buf) +{ + u32 len = 0; + u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; + const struct ath10k_fw_stats_pdev *pdev; + const struct ath10k_fw_stats_vdev *vdev; + const struct ath10k_fw_stats_peer *peer; + size_t num_peers; + size_t num_vdevs; + + spin_lock_bh(&ar->data_lock); + + pdev = list_first_entry_or_null(&fw_stats->pdevs, + struct ath10k_fw_stats_pdev, list); + if (!pdev) { + ath10k_warn(ar, "failed to get pdev stats\n"); + goto unlock; + } + + num_peers = list_count_nodes(&fw_stats->peers); + num_vdevs = list_count_nodes(&fw_stats->vdevs); + + ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len); + ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len); + ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len); + + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", + "ath10k VDEV stats", num_vdevs); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "================="); + + list_for_each_entry(vdev, &fw_stats->vdevs, list) { + ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len); + } + + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", + "ath10k PEER stats", num_peers); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "================="); + + list_for_each_entry(peer, &fw_stats->peers, list) { + ath10k_wmi_fw_peer_stats_fill(peer, buf, &len, + fw_stats->extended); + } + +unlock: + spin_unlock_bh(&ar->data_lock); + + if (len >= buf_len) + buf[len - 1] = 0; + else + buf[len] = 0; +} + +void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar, + struct ath10k_fw_stats *fw_stats, + char *buf) +{ + unsigned int len = 0; + unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE; + const struct ath10k_fw_stats_pdev *pdev; + const struct ath10k_fw_stats_vdev *vdev; + const struct ath10k_fw_stats_peer *peer; + size_t num_peers; + size_t num_vdevs; + + spin_lock_bh(&ar->data_lock); + + pdev = list_first_entry_or_null(&fw_stats->pdevs, + struct ath10k_fw_stats_pdev, list); + if (!pdev) { + ath10k_warn(ar, "failed to get pdev stats\n"); + goto unlock; + } + + num_peers = list_count_nodes(&fw_stats->peers); + num_vdevs = list_count_nodes(&fw_stats->vdevs); + + ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len); + ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len); + ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len); + ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len); + + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", + "ath10k VDEV stats", num_vdevs); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "================="); + + list_for_each_entry(vdev, &fw_stats->vdevs, list) { + ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len); + } + + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", + "ath10k PEER stats", num_peers); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "================="); + + list_for_each_entry(peer, &fw_stats->peers, list) { + ath10k_wmi_fw_peer_stats_fill(peer, buf, &len, + fw_stats->extended); + } + +unlock: + spin_unlock_bh(&ar->data_lock); + + if (len >= buf_len) + buf[len - 1] = 0; + else + buf[len] = 0; +} + +static struct sk_buff * +ath10k_wmi_op_gen_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable, + u32 detect_level, u32 detect_margin) +{ + struct wmi_pdev_set_adaptive_cca_params *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_pdev_set_adaptive_cca_params *)skb->data; + cmd->enable = __cpu_to_le32(enable); + cmd->cca_detect_level = __cpu_to_le32(detect_level); + cmd->cca_detect_margin = __cpu_to_le32(detect_margin); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi pdev set adaptive cca params enable:%d detection level:%d detection margin:%d\n", + enable, detect_level, detect_margin); + return skb; +} + +static void +ath10k_wmi_fw_vdev_stats_extd_fill(const struct ath10k_fw_stats_vdev_extd *vdev, + char *buf, u32 *length) +{ + u32 len = *length; + u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; + u32 val; + + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "vdev id", vdev->vdev_id); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "ppdu aggr count", vdev->ppdu_aggr_cnt); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "ppdu noack", vdev->ppdu_noack); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "mpdu queued", vdev->mpdu_queued); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "ppdu nonaggr count", vdev->ppdu_nonaggr_cnt); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "mpdu sw requeued", vdev->mpdu_sw_requeued); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "mpdu success retry", vdev->mpdu_suc_retry); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "mpdu success multitry", vdev->mpdu_suc_multitry); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "mpdu fail retry", vdev->mpdu_fail_retry); + val = vdev->tx_ftm_suc; + if (val & WMI_VDEV_STATS_FTM_COUNT_VALID) + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "tx ftm success", + MS(val, WMI_VDEV_STATS_FTM_COUNT)); + val = vdev->tx_ftm_suc_retry; + if (val & WMI_VDEV_STATS_FTM_COUNT_VALID) + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "tx ftm success retry", + MS(val, WMI_VDEV_STATS_FTM_COUNT)); + val = vdev->tx_ftm_fail; + if (val & WMI_VDEV_STATS_FTM_COUNT_VALID) + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "tx ftm fail", + MS(val, WMI_VDEV_STATS_FTM_COUNT)); + val = vdev->rx_ftmr_cnt; + if (val & WMI_VDEV_STATS_FTM_COUNT_VALID) + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "rx ftm request count", + MS(val, WMI_VDEV_STATS_FTM_COUNT)); + val = vdev->rx_ftmr_dup_cnt; + if (val & WMI_VDEV_STATS_FTM_COUNT_VALID) + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "rx ftm request dup count", + MS(val, WMI_VDEV_STATS_FTM_COUNT)); + val = vdev->rx_iftmr_cnt; + if (val & WMI_VDEV_STATS_FTM_COUNT_VALID) + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "rx initial ftm req count", + MS(val, WMI_VDEV_STATS_FTM_COUNT)); + val = vdev->rx_iftmr_dup_cnt; + if (val & WMI_VDEV_STATS_FTM_COUNT_VALID) + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "rx initial ftm req dup cnt", + MS(val, WMI_VDEV_STATS_FTM_COUNT)); + len += scnprintf(buf + len, buf_len - len, "\n"); + + *length = len; +} + +void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar, + struct ath10k_fw_stats *fw_stats, + char *buf) +{ + u32 len = 0; + u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; + const struct ath10k_fw_stats_pdev *pdev; + const struct ath10k_fw_stats_vdev_extd *vdev; + const struct ath10k_fw_stats_peer *peer; + const struct ath10k_fw_extd_stats_peer *extd_peer; + size_t num_peers; + size_t num_vdevs; + + spin_lock_bh(&ar->data_lock); + + pdev = list_first_entry_or_null(&fw_stats->pdevs, + struct ath10k_fw_stats_pdev, list); + if (!pdev) { + ath10k_warn(ar, "failed to get pdev stats\n"); + goto unlock; + } + + num_peers = list_count_nodes(&fw_stats->peers); + num_vdevs = list_count_nodes(&fw_stats->vdevs); + + ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len); + ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len); + ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len); + + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "HW paused", pdev->hw_paused); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Seqs posted", pdev->seq_posted); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Seqs failed queueing", pdev->seq_failed_queueing); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Seqs completed", pdev->seq_completed); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Seqs restarted", pdev->seq_restarted); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MU Seqs posted", pdev->mu_seq_posted); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDUs SW flushed", pdev->mpdus_sw_flush); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDUs HW filtered", pdev->mpdus_hw_filter); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDUs truncated", pdev->mpdus_truncated); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDUs receive no ACK", pdev->mpdus_ack_failed); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDUs expired", pdev->mpdus_expired); + + ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Num Rx Overflow errors", pdev->rx_ovfl_errs); + + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", + "ath10k VDEV stats", num_vdevs); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "================="); + list_for_each_entry(vdev, &fw_stats->vdevs, list) { + ath10k_wmi_fw_vdev_stats_extd_fill(vdev, buf, &len); + } + + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", + "ath10k PEER stats", num_peers); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "================="); + + list_for_each_entry(peer, &fw_stats->peers, list) { + ath10k_wmi_fw_peer_stats_fill(peer, buf, &len, + fw_stats->extended); + } + + if (fw_stats->extended) { + list_for_each_entry(extd_peer, &fw_stats->peers_extd, list) { + ath10k_wmi_fw_extd_peer_stats_fill(extd_peer, buf, + &len); + } + } + +unlock: + spin_unlock_bh(&ar->data_lock); + + if (len >= buf_len) + buf[len - 1] = 0; + else + buf[len] = 0; +} + +int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar, + enum wmi_vdev_subtype subtype) +{ + switch (subtype) { + case WMI_VDEV_SUBTYPE_NONE: + return WMI_VDEV_SUBTYPE_LEGACY_NONE; + case WMI_VDEV_SUBTYPE_P2P_DEVICE: + return WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV; + case WMI_VDEV_SUBTYPE_P2P_CLIENT: + return WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI; + case WMI_VDEV_SUBTYPE_P2P_GO: + return WMI_VDEV_SUBTYPE_LEGACY_P2P_GO; + case WMI_VDEV_SUBTYPE_PROXY_STA: + return WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA; + case WMI_VDEV_SUBTYPE_MESH_11S: + case WMI_VDEV_SUBTYPE_MESH_NON_11S: + return -EOPNOTSUPP; + } + return -EOPNOTSUPP; +} + +static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar, + enum wmi_vdev_subtype subtype) +{ + switch (subtype) { + case WMI_VDEV_SUBTYPE_NONE: + return WMI_VDEV_SUBTYPE_10_2_4_NONE; + case WMI_VDEV_SUBTYPE_P2P_DEVICE: + return WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV; + case WMI_VDEV_SUBTYPE_P2P_CLIENT: + return WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI; + case WMI_VDEV_SUBTYPE_P2P_GO: + return WMI_VDEV_SUBTYPE_10_2_4_P2P_GO; + case WMI_VDEV_SUBTYPE_PROXY_STA: + return WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA; + case WMI_VDEV_SUBTYPE_MESH_11S: + return WMI_VDEV_SUBTYPE_10_2_4_MESH_11S; + case WMI_VDEV_SUBTYPE_MESH_NON_11S: + return -EOPNOTSUPP; + } + return -EOPNOTSUPP; +} + +static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar, + enum wmi_vdev_subtype subtype) +{ + switch (subtype) { + case WMI_VDEV_SUBTYPE_NONE: + return WMI_VDEV_SUBTYPE_10_4_NONE; + case WMI_VDEV_SUBTYPE_P2P_DEVICE: + return WMI_VDEV_SUBTYPE_10_4_P2P_DEV; + case WMI_VDEV_SUBTYPE_P2P_CLIENT: + return WMI_VDEV_SUBTYPE_10_4_P2P_CLI; + case WMI_VDEV_SUBTYPE_P2P_GO: + return WMI_VDEV_SUBTYPE_10_4_P2P_GO; + case WMI_VDEV_SUBTYPE_PROXY_STA: + return WMI_VDEV_SUBTYPE_10_4_PROXY_STA; + case WMI_VDEV_SUBTYPE_MESH_11S: + return WMI_VDEV_SUBTYPE_10_4_MESH_11S; + case WMI_VDEV_SUBTYPE_MESH_NON_11S: + return WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S; + } + return -EOPNOTSUPP; +} + +static struct sk_buff * +ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar, + enum wmi_host_platform_type type, + u32 fw_feature_bitmap) +{ + struct wmi_ext_resource_config_10_4_cmd *cmd; + struct sk_buff *skb; + u32 num_tdls_sleep_sta = 0; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + if (test_bit(WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, ar->wmi.svc_map)) + num_tdls_sleep_sta = TARGET_10_4_NUM_TDLS_SLEEP_STA; + + cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data; + cmd->host_platform_config = __cpu_to_le32(type); + cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap); + cmd->wlan_gpio_priority = __cpu_to_le32(ar->coex_gpio_pin); + cmd->coex_version = __cpu_to_le32(WMI_NO_COEX_VERSION_SUPPORT); + cmd->coex_gpio_pin1 = __cpu_to_le32(-1); + cmd->coex_gpio_pin2 = __cpu_to_le32(-1); + cmd->coex_gpio_pin3 = __cpu_to_le32(-1); + cmd->num_tdls_vdevs = __cpu_to_le32(TARGET_10_4_NUM_TDLS_VDEVS); + cmd->num_tdls_conn_table_entries = __cpu_to_le32(20); + cmd->max_tdls_concurrent_sleep_sta = __cpu_to_le32(num_tdls_sleep_sta); + cmd->max_tdls_concurrent_buffer_sta = + __cpu_to_le32(TARGET_10_4_NUM_TDLS_BUFFER_STA); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi ext resource config host type %d firmware feature bitmap %08x\n", + type, fw_feature_bitmap); + return skb; +} + +static struct sk_buff * +ath10k_wmi_10_4_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id, + enum wmi_tdls_state state) +{ + struct wmi_10_4_tdls_set_state_cmd *cmd; + struct sk_buff *skb; + u32 options = 0; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map) && + state == WMI_TDLS_ENABLE_ACTIVE) + state = WMI_TDLS_ENABLE_PASSIVE; + + if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map)) + options |= WMI_TDLS_BUFFER_STA_EN; + + cmd = (struct wmi_10_4_tdls_set_state_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->state = __cpu_to_le32(state); + cmd->notification_interval_ms = __cpu_to_le32(5000); + cmd->tx_discovery_threshold = __cpu_to_le32(100); + cmd->tx_teardown_threshold = __cpu_to_le32(5); + cmd->rssi_teardown_threshold = __cpu_to_le32(-75); + cmd->rssi_delta = __cpu_to_le32(-20); + cmd->tdls_options = __cpu_to_le32(options); + cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2); + cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000); + cmd->tdls_puapsd_mask = __cpu_to_le32(0xf); + cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0); + cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10); + cmd->teardown_notification_ms = __cpu_to_le32(10); + cmd->tdls_peer_kickout_threshold = __cpu_to_le32(96); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi update fw tdls state %d for vdev %i\n", + state, vdev_id); + return skb; +} + +static u32 ath10k_wmi_prepare_peer_qos(u8 uapsd_queues, u8 sp) +{ + u32 peer_qos = 0; + + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) + peer_qos |= WMI_TDLS_PEER_QOS_AC_VO; + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) + peer_qos |= WMI_TDLS_PEER_QOS_AC_VI; + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) + peer_qos |= WMI_TDLS_PEER_QOS_AC_BK; + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) + peer_qos |= WMI_TDLS_PEER_QOS_AC_BE; + + peer_qos |= SM(sp, WMI_TDLS_PEER_SP); + + return peer_qos; +} + +static struct sk_buff * +ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param) +{ + struct wmi_pdev_get_tpc_table_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_pdev_get_tpc_table_cmd *)skb->data; + cmd->param = __cpu_to_le32(param); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi pdev get tpc table param:%d\n", param); + return skb; +} + +static struct sk_buff * +ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar, + const struct wmi_tdls_peer_update_cmd_arg *arg, + const struct wmi_tdls_peer_capab_arg *cap, + const struct wmi_channel_arg *chan_arg) +{ + struct wmi_10_4_tdls_peer_update_cmd *cmd; + struct wmi_tdls_peer_capabilities *peer_cap; + struct wmi_channel *chan; + struct sk_buff *skb; + u32 peer_qos; + int len, chan_len; + int i; + + /* tdls peer update cmd has place holder for one channel*/ + chan_len = cap->peer_chan_len ? (cap->peer_chan_len - 1) : 0; + + len = sizeof(*cmd) + chan_len * sizeof(*chan); + + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_10_4_tdls_peer_update_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(arg->vdev_id); + ether_addr_copy(cmd->peer_macaddr.addr, arg->addr); + cmd->peer_state = __cpu_to_le32(arg->peer_state); + + peer_qos = ath10k_wmi_prepare_peer_qos(cap->peer_uapsd_queues, + cap->peer_max_sp); + + peer_cap = &cmd->peer_capab; + peer_cap->peer_qos = __cpu_to_le32(peer_qos); + peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support); + peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support); + peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass); + peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass); + peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len); + peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len); + + for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++) + peer_cap->peer_operclass[i] = cap->peer_operclass[i]; + + peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder); + peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num); + peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw); + + for (i = 0; i < cap->peer_chan_len; i++) { + chan = (struct wmi_channel *)&peer_cap->peer_chan_list[i]; + ath10k_wmi_put_wmi_channel(ar, chan, &chan_arg[i]); + } + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tdls peer update vdev %i state %d n_chans %u\n", + arg->vdev_id, arg->peer_state, cap->peer_chan_len); + return skb; +} + +static struct sk_buff * +ath10k_wmi_10_4_gen_radar_found(struct ath10k *ar, + const struct ath10k_radar_found_info *arg) +{ + struct wmi_radar_found_info *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_radar_found_info *)skb->data; + cmd->pri_min = __cpu_to_le32(arg->pri_min); + cmd->pri_max = __cpu_to_le32(arg->pri_max); + cmd->width_min = __cpu_to_le32(arg->width_min); + cmd->width_max = __cpu_to_le32(arg->width_max); + cmd->sidx_min = __cpu_to_le32(arg->sidx_min); + cmd->sidx_max = __cpu_to_le32(arg->sidx_max); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi radar found pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n", + arg->pri_min, arg->pri_max, arg->width_min, + arg->width_max, arg->sidx_min, arg->sidx_max); + return skb; +} + +static struct sk_buff * +ath10k_wmi_10_4_gen_per_peer_per_tid_cfg(struct ath10k *ar, + const struct wmi_per_peer_per_tid_cfg_arg *arg) +{ + struct wmi_peer_per_tid_cfg_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + memset(skb->data, 0, sizeof(*cmd)); + + cmd = (struct wmi_peer_per_tid_cfg_cmd *)skb->data; + cmd->vdev_id = cpu_to_le32(arg->vdev_id); + ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr.addr); + cmd->tid = cpu_to_le32(arg->tid); + cmd->ack_policy = cpu_to_le32(arg->ack_policy); + cmd->aggr_control = cpu_to_le32(arg->aggr_control); + cmd->rate_control = cpu_to_le32(arg->rate_ctrl); + cmd->retry_count = cpu_to_le32(arg->retry_count); + cmd->rcode_flags = cpu_to_le32(arg->rcode_flags); + cmd->ext_tid_cfg_bitmap = cpu_to_le32(arg->ext_tid_cfg_bitmap); + cmd->rtscts_ctrl = cpu_to_le32(arg->rtscts_ctrl); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi noack tid %d vdev id %d ack_policy %d aggr %u rate_ctrl %u rcflag %u retry_count %d rtscts %d ext_tid_cfg_bitmap %d mac_addr %pM\n", + arg->tid, arg->vdev_id, arg->ack_policy, arg->aggr_control, + arg->rate_ctrl, arg->rcode_flags, arg->retry_count, + arg->rtscts_ctrl, arg->ext_tid_cfg_bitmap, arg->peer_macaddr.addr); + return skb; +} + +static struct sk_buff * +ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value) +{ + struct wmi_echo_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_echo_cmd *)skb->data; + cmd->value = cpu_to_le32(value); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi echo value 0x%08x\n", value); + return skb; +} + +int +ath10k_wmi_barrier(struct ath10k *ar) +{ + int ret; + int time_left; + + spin_lock_bh(&ar->data_lock); + reinit_completion(&ar->wmi.barrier); + spin_unlock_bh(&ar->data_lock); + + ret = ath10k_wmi_echo(ar, ATH10K_WMI_BARRIER_ECHO_ID); + if (ret) { + ath10k_warn(ar, "failed to submit wmi echo: %d\n", ret); + return ret; + } + + time_left = wait_for_completion_timeout(&ar->wmi.barrier, + ATH10K_WMI_BARRIER_TIMEOUT_HZ); + if (!time_left) + return -ETIMEDOUT; + + return 0; +} + +static struct sk_buff * +ath10k_wmi_10_2_4_op_gen_bb_timing(struct ath10k *ar, + const struct wmi_bb_timing_cfg_arg *arg) +{ + struct wmi_pdev_bb_timing_cfg_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_pdev_bb_timing_cfg_cmd *)skb->data; + cmd->bb_tx_timing = __cpu_to_le32(arg->bb_tx_timing); + cmd->bb_xpa_timing = __cpu_to_le32(arg->bb_xpa_timing); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi pdev bb_tx_timing 0x%x bb_xpa_timing 0x%x\n", + arg->bb_tx_timing, arg->bb_xpa_timing); + return skb; +} + +static const struct wmi_ops wmi_ops = { + .rx = ath10k_wmi_op_rx, + .map_svc = wmi_main_svc_map, + + .pull_scan = ath10k_wmi_op_pull_scan_ev, + .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev, + .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev, + .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev, + .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev, + .pull_swba = ath10k_wmi_op_pull_swba_ev, + .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr, + .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev, + .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev, + .pull_rdy = ath10k_wmi_op_pull_rdy_ev, + .pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats, + .pull_roam_ev = ath10k_wmi_op_pull_roam_ev, + .pull_echo_ev = ath10k_wmi_op_pull_echo_ev, + + .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend, + .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume, + .gen_pdev_set_rd = ath10k_wmi_op_gen_pdev_set_rd, + .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param, + .gen_init = ath10k_wmi_op_gen_init, + .gen_start_scan = ath10k_wmi_op_gen_start_scan, + .gen_stop_scan = ath10k_wmi_op_gen_stop_scan, + .gen_vdev_create = ath10k_wmi_op_gen_vdev_create, + .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete, + .gen_vdev_start = ath10k_wmi_op_gen_vdev_start, + .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop, + .gen_vdev_up = ath10k_wmi_op_gen_vdev_up, + .gen_vdev_down = ath10k_wmi_op_gen_vdev_down, + .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param, + .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key, + .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf, + .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable, + /* .gen_vdev_wmm_conf not implemented */ + .gen_peer_create = ath10k_wmi_op_gen_peer_create, + .gen_peer_delete = ath10k_wmi_op_gen_peer_delete, + .gen_peer_flush = ath10k_wmi_op_gen_peer_flush, + .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param, + .gen_peer_assoc = ath10k_wmi_op_gen_peer_assoc, + .gen_set_psmode = ath10k_wmi_op_gen_set_psmode, + .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps, + .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps, + .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list, + .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma, + .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm, + .gen_request_stats = ath10k_wmi_op_gen_request_stats, + .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang, + .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx, + .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg, + .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable, + .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable, + .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode, + /* .gen_pdev_get_temperature not implemented */ + .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp, + .gen_addba_send = ath10k_wmi_op_gen_addba_send, + .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, + .gen_delba_send = ath10k_wmi_op_gen_delba_send, + .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill, + .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype, + .gen_echo = ath10k_wmi_op_gen_echo, + .gen_gpio_config = ath10k_wmi_op_gen_gpio_config, + .gen_gpio_output = ath10k_wmi_op_gen_gpio_output, + + /* .gen_bcn_tmpl not implemented */ + /* .gen_prb_tmpl not implemented */ + /* .gen_p2p_go_bcn_ie not implemented */ + /* .gen_adaptive_qcs not implemented */ + /* .gen_pdev_enable_adaptive_cca not implemented */ +}; + +static const struct wmi_ops wmi_10_1_ops = { + .rx = ath10k_wmi_10_1_op_rx, + .map_svc = wmi_10x_svc_map, + .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev, + .pull_fw_stats = ath10k_wmi_10x_op_pull_fw_stats, + .gen_init = ath10k_wmi_10_1_op_gen_init, + .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd, + .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan, + .gen_peer_assoc = ath10k_wmi_10_1_op_gen_peer_assoc, + /* .gen_pdev_get_temperature not implemented */ + + /* shared with main branch */ + .pull_scan = ath10k_wmi_op_pull_scan_ev, + .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev, + .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev, + .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev, + .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev, + .pull_swba = ath10k_wmi_op_pull_swba_ev, + .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr, + .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev, + .pull_rdy = ath10k_wmi_op_pull_rdy_ev, + .pull_roam_ev = ath10k_wmi_op_pull_roam_ev, + .pull_echo_ev = ath10k_wmi_op_pull_echo_ev, + + .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend, + .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume, + .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param, + .gen_stop_scan = ath10k_wmi_op_gen_stop_scan, + .gen_vdev_create = ath10k_wmi_op_gen_vdev_create, + .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete, + .gen_vdev_start = ath10k_wmi_op_gen_vdev_start, + .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop, + .gen_vdev_up = ath10k_wmi_op_gen_vdev_up, + .gen_vdev_down = ath10k_wmi_op_gen_vdev_down, + .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param, + .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key, + .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf, + .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable, + /* .gen_vdev_wmm_conf not implemented */ + .gen_peer_create = ath10k_wmi_op_gen_peer_create, + .gen_peer_delete = ath10k_wmi_op_gen_peer_delete, + .gen_peer_flush = ath10k_wmi_op_gen_peer_flush, + .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param, + .gen_set_psmode = ath10k_wmi_op_gen_set_psmode, + .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps, + .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps, + .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list, + .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma, + .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm, + .gen_request_stats = ath10k_wmi_op_gen_request_stats, + .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang, + .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx, + .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg, + .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable, + .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable, + .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode, + .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp, + .gen_addba_send = ath10k_wmi_op_gen_addba_send, + .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, + .gen_delba_send = ath10k_wmi_op_gen_delba_send, + .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill, + .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype, + .gen_echo = ath10k_wmi_op_gen_echo, + .gen_gpio_config = ath10k_wmi_op_gen_gpio_config, + .gen_gpio_output = ath10k_wmi_op_gen_gpio_output, + /* .gen_bcn_tmpl not implemented */ + /* .gen_prb_tmpl not implemented */ + /* .gen_p2p_go_bcn_ie not implemented */ + /* .gen_adaptive_qcs not implemented */ + /* .gen_pdev_enable_adaptive_cca not implemented */ +}; + +static const struct wmi_ops wmi_10_2_ops = { + .rx = ath10k_wmi_10_2_op_rx, + .pull_fw_stats = ath10k_wmi_10_2_op_pull_fw_stats, + .gen_init = ath10k_wmi_10_2_op_gen_init, + .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc, + /* .gen_pdev_get_temperature not implemented */ + + /* shared with 10.1 */ + .map_svc = wmi_10x_svc_map, + .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev, + .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd, + .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan, + .gen_echo = ath10k_wmi_op_gen_echo, + + .pull_scan = ath10k_wmi_op_pull_scan_ev, + .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev, + .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev, + .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev, + .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev, + .pull_swba = ath10k_wmi_op_pull_swba_ev, + .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr, + .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev, + .pull_rdy = ath10k_wmi_op_pull_rdy_ev, + .pull_roam_ev = ath10k_wmi_op_pull_roam_ev, + .pull_echo_ev = ath10k_wmi_op_pull_echo_ev, + + .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend, + .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume, + .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param, + .gen_stop_scan = ath10k_wmi_op_gen_stop_scan, + .gen_vdev_create = ath10k_wmi_op_gen_vdev_create, + .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete, + .gen_vdev_start = ath10k_wmi_op_gen_vdev_start, + .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop, + .gen_vdev_up = ath10k_wmi_op_gen_vdev_up, + .gen_vdev_down = ath10k_wmi_op_gen_vdev_down, + .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param, + .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key, + .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf, + .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable, + /* .gen_vdev_wmm_conf not implemented */ + .gen_peer_create = ath10k_wmi_op_gen_peer_create, + .gen_peer_delete = ath10k_wmi_op_gen_peer_delete, + .gen_peer_flush = ath10k_wmi_op_gen_peer_flush, + .gen_pdev_set_base_macaddr = ath10k_wmi_op_gen_pdev_set_base_macaddr, + .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param, + .gen_set_psmode = ath10k_wmi_op_gen_set_psmode, + .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps, + .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps, + .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list, + .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma, + .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm, + .gen_request_stats = ath10k_wmi_op_gen_request_stats, + .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang, + .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx, + .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg, + .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable, + .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable, + .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode, + .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp, + .gen_addba_send = ath10k_wmi_op_gen_addba_send, + .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, + .gen_delba_send = ath10k_wmi_op_gen_delba_send, + .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill, + .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype, + .gen_gpio_config = ath10k_wmi_op_gen_gpio_config, + .gen_gpio_output = ath10k_wmi_op_gen_gpio_output, + /* .gen_pdev_enable_adaptive_cca not implemented */ +}; + +static const struct wmi_ops wmi_10_2_4_ops = { + .rx = ath10k_wmi_10_2_op_rx, + .pull_fw_stats = ath10k_wmi_10_2_4_op_pull_fw_stats, + .gen_init = ath10k_wmi_10_2_op_gen_init, + .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc, + .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature, + .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info, + + /* shared with 10.1 */ + .map_svc = wmi_10x_svc_map, + .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev, + .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd, + .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan, + .gen_echo = ath10k_wmi_op_gen_echo, + + .pull_scan = ath10k_wmi_op_pull_scan_ev, + .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev, + .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev, + .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev, + .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev, + .pull_swba = ath10k_wmi_10_2_4_op_pull_swba_ev, + .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr, + .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev, + .pull_rdy = ath10k_wmi_op_pull_rdy_ev, + .pull_roam_ev = ath10k_wmi_op_pull_roam_ev, + .pull_echo_ev = ath10k_wmi_op_pull_echo_ev, + + .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend, + .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume, + .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param, + .gen_stop_scan = ath10k_wmi_op_gen_stop_scan, + .gen_vdev_create = ath10k_wmi_op_gen_vdev_create, + .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete, + .gen_vdev_start = ath10k_wmi_op_gen_vdev_start, + .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop, + .gen_vdev_up = ath10k_wmi_op_gen_vdev_up, + .gen_vdev_down = ath10k_wmi_op_gen_vdev_down, + .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param, + .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key, + .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf, + .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable, + .gen_peer_create = ath10k_wmi_op_gen_peer_create, + .gen_peer_delete = ath10k_wmi_op_gen_peer_delete, + .gen_peer_flush = ath10k_wmi_op_gen_peer_flush, + .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param, + .gen_set_psmode = ath10k_wmi_op_gen_set_psmode, + .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps, + .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps, + .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list, + .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma, + .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm, + .gen_request_stats = ath10k_wmi_op_gen_request_stats, + .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang, + .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx, + .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg, + .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable, + .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable, + .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode, + .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp, + .gen_addba_send = ath10k_wmi_op_gen_addba_send, + .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, + .gen_delba_send = ath10k_wmi_op_gen_delba_send, + .gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config, + .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill, + .gen_pdev_enable_adaptive_cca = + ath10k_wmi_op_gen_pdev_enable_adaptive_cca, + .get_vdev_subtype = ath10k_wmi_10_2_4_op_get_vdev_subtype, + .gen_bb_timing = ath10k_wmi_10_2_4_op_gen_bb_timing, + .gen_gpio_config = ath10k_wmi_op_gen_gpio_config, + .gen_gpio_output = ath10k_wmi_op_gen_gpio_output, + /* .gen_bcn_tmpl not implemented */ + /* .gen_prb_tmpl not implemented */ + /* .gen_p2p_go_bcn_ie not implemented */ + /* .gen_adaptive_qcs not implemented */ +}; + +static const struct wmi_ops wmi_10_4_ops = { + .rx = ath10k_wmi_10_4_op_rx, + .map_svc = wmi_10_4_svc_map, + + .pull_fw_stats = ath10k_wmi_10_4_op_pull_fw_stats, + .pull_scan = ath10k_wmi_op_pull_scan_ev, + .pull_mgmt_rx = ath10k_wmi_10_4_op_pull_mgmt_rx_ev, + .pull_ch_info = ath10k_wmi_10_4_op_pull_ch_info_ev, + .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev, + .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev, + .pull_swba = ath10k_wmi_10_4_op_pull_swba_ev, + .pull_phyerr_hdr = ath10k_wmi_10_4_op_pull_phyerr_ev_hdr, + .pull_phyerr = ath10k_wmi_10_4_op_pull_phyerr_ev, + .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev, + .pull_rdy = ath10k_wmi_op_pull_rdy_ev, + .pull_roam_ev = ath10k_wmi_op_pull_roam_ev, + .pull_dfs_status_ev = ath10k_wmi_10_4_op_pull_dfs_status_ev, + .get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme, + + .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend, + .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume, + .gen_pdev_set_base_macaddr = ath10k_wmi_op_gen_pdev_set_base_macaddr, + .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd, + .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param, + .gen_init = ath10k_wmi_10_4_op_gen_init, + .gen_start_scan = ath10k_wmi_op_gen_start_scan, + .gen_stop_scan = ath10k_wmi_op_gen_stop_scan, + .gen_vdev_create = ath10k_wmi_op_gen_vdev_create, + .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete, + .gen_vdev_start = ath10k_wmi_op_gen_vdev_start, + .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop, + .gen_vdev_up = ath10k_wmi_op_gen_vdev_up, + .gen_vdev_down = ath10k_wmi_op_gen_vdev_down, + .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param, + .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key, + .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf, + .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable, + .gen_peer_create = ath10k_wmi_op_gen_peer_create, + .gen_peer_delete = ath10k_wmi_op_gen_peer_delete, + .gen_peer_flush = ath10k_wmi_op_gen_peer_flush, + .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param, + .gen_peer_assoc = ath10k_wmi_10_4_op_gen_peer_assoc, + .gen_set_psmode = ath10k_wmi_op_gen_set_psmode, + .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps, + .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps, + .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list, + .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma, + .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm, + .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang, + .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx, + .gen_dbglog_cfg = ath10k_wmi_10_4_op_gen_dbglog_cfg, + .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable, + .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable, + .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode, + .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp, + .gen_addba_send = ath10k_wmi_op_gen_addba_send, + .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, + .gen_delba_send = ath10k_wmi_op_gen_delba_send, + .fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill, + .ext_resource_config = ath10k_wmi_10_4_ext_resource_config, + .gen_update_fw_tdls_state = ath10k_wmi_10_4_gen_update_fw_tdls_state, + .gen_tdls_peer_update = ath10k_wmi_10_4_gen_tdls_peer_update, + .gen_pdev_get_tpc_table_cmdid = + ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid, + .gen_radar_found = ath10k_wmi_10_4_gen_radar_found, + .gen_per_peer_per_tid_cfg = ath10k_wmi_10_4_gen_per_peer_per_tid_cfg, + + /* shared with 10.2 */ + .pull_echo_ev = ath10k_wmi_op_pull_echo_ev, + .gen_request_stats = ath10k_wmi_op_gen_request_stats, + .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature, + .get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype, + .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info, + .gen_echo = ath10k_wmi_op_gen_echo, + .gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config, + .gen_gpio_config = ath10k_wmi_op_gen_gpio_config, + .gen_gpio_output = ath10k_wmi_op_gen_gpio_output, +}; + +int ath10k_wmi_attach(struct ath10k *ar) +{ + switch (ar->running_fw->fw_file.wmi_op_version) { + case ATH10K_FW_WMI_OP_VERSION_10_4: + ar->wmi.ops = &wmi_10_4_ops; + ar->wmi.cmd = &wmi_10_4_cmd_map; + ar->wmi.vdev_param = &wmi_10_4_vdev_param_map; + ar->wmi.pdev_param = &wmi_10_4_pdev_param_map; + ar->wmi.peer_param = &wmi_peer_param_map; + ar->wmi.peer_flags = &wmi_10_2_peer_flags_map; + ar->wmi_key_cipher = wmi_key_cipher_suites; + break; + case ATH10K_FW_WMI_OP_VERSION_10_2_4: + ar->wmi.cmd = &wmi_10_2_4_cmd_map; + ar->wmi.ops = &wmi_10_2_4_ops; + ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map; + ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map; + ar->wmi.peer_param = &wmi_peer_param_map; + ar->wmi.peer_flags = &wmi_10_2_peer_flags_map; + ar->wmi_key_cipher = wmi_key_cipher_suites; + break; + case ATH10K_FW_WMI_OP_VERSION_10_2: + ar->wmi.cmd = &wmi_10_2_cmd_map; + ar->wmi.ops = &wmi_10_2_ops; + ar->wmi.vdev_param = &wmi_10x_vdev_param_map; + ar->wmi.pdev_param = &wmi_10x_pdev_param_map; + ar->wmi.peer_param = &wmi_peer_param_map; + ar->wmi.peer_flags = &wmi_10_2_peer_flags_map; + ar->wmi_key_cipher = wmi_key_cipher_suites; + break; + case ATH10K_FW_WMI_OP_VERSION_10_1: + ar->wmi.cmd = &wmi_10x_cmd_map; + ar->wmi.ops = &wmi_10_1_ops; + ar->wmi.vdev_param = &wmi_10x_vdev_param_map; + ar->wmi.pdev_param = &wmi_10x_pdev_param_map; + ar->wmi.peer_param = &wmi_peer_param_map; + ar->wmi.peer_flags = &wmi_10x_peer_flags_map; + ar->wmi_key_cipher = wmi_key_cipher_suites; + break; + case ATH10K_FW_WMI_OP_VERSION_MAIN: + ar->wmi.cmd = &wmi_cmd_map; + ar->wmi.ops = &wmi_ops; + ar->wmi.vdev_param = &wmi_vdev_param_map; + ar->wmi.pdev_param = &wmi_pdev_param_map; + ar->wmi.peer_param = &wmi_peer_param_map; + ar->wmi.peer_flags = &wmi_peer_flags_map; + ar->wmi_key_cipher = wmi_key_cipher_suites; + break; + case ATH10K_FW_WMI_OP_VERSION_TLV: + ath10k_wmi_tlv_attach(ar); + ar->wmi_key_cipher = wmi_tlv_key_cipher_suites; + break; + case ATH10K_FW_WMI_OP_VERSION_UNSET: + case ATH10K_FW_WMI_OP_VERSION_MAX: + ath10k_err(ar, "unsupported WMI op version: %d\n", + ar->running_fw->fw_file.wmi_op_version); + return -EINVAL; + } + + init_completion(&ar->wmi.service_ready); + init_completion(&ar->wmi.unified_ready); + init_completion(&ar->wmi.barrier); + init_completion(&ar->wmi.radar_confirm); + + INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work); + INIT_WORK(&ar->radar_confirmation_work, + ath10k_radar_confirmation_work); + + if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF, + ar->running_fw->fw_file.fw_features)) { + idr_init(&ar->wmi.mgmt_pending_tx); + } + + return 0; +} + +void ath10k_wmi_free_host_mem(struct ath10k *ar) +{ + int i; + + /* free the host memory chunks requested by firmware */ + for (i = 0; i < ar->wmi.num_mem_chunks; i++) { + dma_free_coherent(ar->dev, + ar->wmi.mem_chunks[i].len, + ar->wmi.mem_chunks[i].vaddr, + ar->wmi.mem_chunks[i].paddr); + } + + ar->wmi.num_mem_chunks = 0; +} + +static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr, + void *ctx) +{ + struct ath10k_mgmt_tx_pkt_addr *pkt_addr = ptr; + struct ath10k *ar = ctx; + struct sk_buff *msdu; + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "force cleanup mgmt msdu_id %u\n", msdu_id); + + msdu = pkt_addr->vaddr; + dma_unmap_single(ar->dev, pkt_addr->paddr, + msdu->len, DMA_TO_DEVICE); + ieee80211_free_txskb(ar->hw, msdu); + kfree(pkt_addr); + + return 0; +} + +void ath10k_wmi_detach(struct ath10k *ar) +{ + if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF, + ar->running_fw->fw_file.fw_features)) { + spin_lock_bh(&ar->data_lock); + idr_for_each(&ar->wmi.mgmt_pending_tx, + ath10k_wmi_mgmt_tx_clean_up_pending, ar); + idr_destroy(&ar->wmi.mgmt_pending_tx); + spin_unlock_bh(&ar->data_lock); + } - ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id); - return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID); + cancel_work_sync(&ar->svc_rdy_work); + dev_kfree_skb(ar->svc_rdy_skb); } diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 9555f5a0e041..7f50a1de6b97 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -1,25 +1,16 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #ifndef _WMI_H_ #define _WMI_H_ #include <linux/types.h> -#include <net/mac80211.h> +#include <linux/ieee80211.h> /* * This file specifies the WMI interface for the Unified Software @@ -55,7 +46,7 @@ * type. * * 6. Comment each parameter part of the WMI command/event structure by - * using the 2 stars at the begining of C comment instead of one star to + * using the 2 stars at the beginning of C comment instead of one star to * enable HTML document generation using Doxygen. * */ @@ -73,119 +64,788 @@ struct wmi_cmd_hdr { #define HTC_PROTOCOL_VERSION 0x0002 #define WMI_PROTOCOL_VERSION 0x0002 -enum wmi_service_id { - WMI_SERVICE_BEACON_OFFLOAD = 0, /* beacon offload */ - WMI_SERVICE_SCAN_OFFLOAD, /* scan offload */ - WMI_SERVICE_ROAM_OFFLOAD, /* roam offload */ - WMI_SERVICE_BCN_MISS_OFFLOAD, /* beacon miss offload */ - WMI_SERVICE_STA_PWRSAVE, /* fake sleep + basic power save */ - WMI_SERVICE_STA_ADVANCED_PWRSAVE, /* uapsd, pspoll, force sleep */ - WMI_SERVICE_AP_UAPSD, /* uapsd on AP */ - WMI_SERVICE_AP_DFS, /* DFS on AP */ - WMI_SERVICE_11AC, /* supports 11ac */ - WMI_SERVICE_BLOCKACK, /* Supports triggering ADDBA/DELBA from host*/ - WMI_SERVICE_PHYERR, /* PHY error */ - WMI_SERVICE_BCN_FILTER, /* Beacon filter support */ - WMI_SERVICE_RTT, /* RTT (round trip time) support */ - WMI_SERVICE_RATECTRL, /* Rate-control */ - WMI_SERVICE_WOW, /* WOW Support */ - WMI_SERVICE_RATECTRL_CACHE, /* Rate-control caching */ - WMI_SERVICE_IRAM_TIDS, /* TIDs in IRAM */ - WMI_SERVICE_ARPNS_OFFLOAD, /* ARP NS Offload support */ - WMI_SERVICE_NLO, /* Network list offload service */ - WMI_SERVICE_GTK_OFFLOAD, /* GTK offload */ - WMI_SERVICE_SCAN_SCH, /* Scan Scheduler Service */ - WMI_SERVICE_CSA_OFFLOAD, /* CSA offload service */ - WMI_SERVICE_CHATTER, /* Chatter service */ - WMI_SERVICE_COEX_FREQAVOID, /* FW report freq range to avoid */ - WMI_SERVICE_PACKET_POWER_SAVE, /* packet power save service */ - WMI_SERVICE_FORCE_FW_HANG, /* To test fw recovery mechanism */ - WMI_SERVICE_GPIO, /* GPIO service */ - WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, /* Modulated DTIM support */ - WMI_STA_UAPSD_BASIC_AUTO_TRIG, /* UAPSD AC Trigger Generation */ - WMI_STA_UAPSD_VAR_AUTO_TRIG, /* -do- */ - WMI_SERVICE_STA_KEEP_ALIVE, /* STA keep alive mechanism support */ - WMI_SERVICE_TX_ENCAP, /* Packet type for TX encapsulation */ - - WMI_SERVICE_LAST, - WMI_MAX_SERVICE = 64 /* max service */ -}; - -static inline char *wmi_service_name(int service_id) +/* + * There is no signed version of __le32, so for a temporary solution come + * up with our own version. The idea is from fs/ntfs/endian.h. + * + * Use a_ prefix so that it doesn't conflict if we get proper support to + * linux/types.h. + */ +typedef __s32 __bitwise a_sle32; + +static inline a_sle32 a_cpu_to_sle32(s32 val) +{ + return (__force a_sle32)cpu_to_le32(val); +} + +static inline s32 a_sle32_to_cpu(a_sle32 val) +{ + return le32_to_cpu((__force __le32)val); +} + +enum wmi_service { + WMI_SERVICE_BEACON_OFFLOAD = 0, + WMI_SERVICE_SCAN_OFFLOAD, + WMI_SERVICE_ROAM_OFFLOAD, + WMI_SERVICE_BCN_MISS_OFFLOAD, + WMI_SERVICE_STA_PWRSAVE, + WMI_SERVICE_STA_ADVANCED_PWRSAVE, + WMI_SERVICE_AP_UAPSD, + WMI_SERVICE_AP_DFS, + WMI_SERVICE_11AC, + WMI_SERVICE_BLOCKACK, + WMI_SERVICE_PHYERR, + WMI_SERVICE_BCN_FILTER, + WMI_SERVICE_RTT, + WMI_SERVICE_RATECTRL, + WMI_SERVICE_WOW, + WMI_SERVICE_RATECTRL_CACHE, + WMI_SERVICE_IRAM_TIDS, + WMI_SERVICE_ARPNS_OFFLOAD, + WMI_SERVICE_NLO, + WMI_SERVICE_GTK_OFFLOAD, + WMI_SERVICE_SCAN_SCH, + WMI_SERVICE_CSA_OFFLOAD, + WMI_SERVICE_CHATTER, + WMI_SERVICE_COEX_FREQAVOID, + WMI_SERVICE_PACKET_POWER_SAVE, + WMI_SERVICE_FORCE_FW_HANG, + WMI_SERVICE_GPIO, + WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, + WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, + WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, + WMI_SERVICE_STA_KEEP_ALIVE, + WMI_SERVICE_TX_ENCAP, + WMI_SERVICE_BURST, + WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, + WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, + WMI_SERVICE_ROAM_SCAN_OFFLOAD, + WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, + WMI_SERVICE_EARLY_RX, + WMI_SERVICE_STA_SMPS, + WMI_SERVICE_FWTEST, + WMI_SERVICE_STA_WMMAC, + WMI_SERVICE_TDLS, + WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE, + WMI_SERVICE_ADAPTIVE_OCS, + WMI_SERVICE_BA_SSN_SUPPORT, + WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE, + WMI_SERVICE_WLAN_HB, + WMI_SERVICE_LTE_ANT_SHARE_SUPPORT, + WMI_SERVICE_BATCH_SCAN, + WMI_SERVICE_QPOWER, + WMI_SERVICE_PLMREQ, + WMI_SERVICE_THERMAL_MGMT, + WMI_SERVICE_RMC, + WMI_SERVICE_MHF_OFFLOAD, + WMI_SERVICE_COEX_SAR, + WMI_SERVICE_BCN_TXRATE_OVERRIDE, + WMI_SERVICE_NAN, + WMI_SERVICE_L1SS_STAT, + WMI_SERVICE_ESTIMATE_LINKSPEED, + WMI_SERVICE_OBSS_SCAN, + WMI_SERVICE_TDLS_OFFCHAN, + WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, + WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, + WMI_SERVICE_IBSS_PWRSAVE, + WMI_SERVICE_LPASS, + WMI_SERVICE_EXTSCAN, + WMI_SERVICE_D0WOW, + WMI_SERVICE_HSOFFLOAD, + WMI_SERVICE_ROAM_HO_OFFLOAD, + WMI_SERVICE_RX_FULL_REORDER, + WMI_SERVICE_DHCP_OFFLOAD, + WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT, + WMI_SERVICE_MDNS_OFFLOAD, + WMI_SERVICE_SAP_AUTH_OFFLOAD, + WMI_SERVICE_ATF, + WMI_SERVICE_COEX_GPIO, + WMI_SERVICE_ENHANCED_PROXY_STA, + WMI_SERVICE_TT, + WMI_SERVICE_PEER_CACHING, + WMI_SERVICE_AUX_SPECTRAL_INTF, + WMI_SERVICE_AUX_CHAN_LOAD_INTF, + WMI_SERVICE_BSS_CHANNEL_INFO_64, + WMI_SERVICE_EXT_RES_CFG_SUPPORT, + WMI_SERVICE_MESH_11S, + WMI_SERVICE_MESH_NON_11S, + WMI_SERVICE_PEER_STATS, + WMI_SERVICE_RESTRT_CHNL_SUPPORT, + WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, + WMI_SERVICE_TX_MODE_PUSH_ONLY, + WMI_SERVICE_TX_MODE_PUSH_PULL, + WMI_SERVICE_TX_MODE_DYNAMIC, + WMI_SERVICE_VDEV_RX_FILTER, + WMI_SERVICE_BTCOEX, + WMI_SERVICE_CHECK_CAL_VERSION, + WMI_SERVICE_DBGLOG_WARN2, + WMI_SERVICE_BTCOEX_DUTY_CYCLE, + WMI_SERVICE_4_WIRE_COEX_SUPPORT, + WMI_SERVICE_EXTENDED_NSS_SUPPORT, + WMI_SERVICE_PROG_GPIO_BAND_SELECT, + WMI_SERVICE_SMART_LOGGING_SUPPORT, + WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, + WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, + WMI_SERVICE_MGMT_TX_WMI, + WMI_SERVICE_TDLS_WIDER_BANDWIDTH, + WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, + WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, + WMI_SERVICE_TPC_STATS_FINAL, + WMI_SERVICE_RESET_CHIP, + WMI_SERVICE_SPOOF_MAC_SUPPORT, + WMI_SERVICE_TX_DATA_ACK_RSSI, + WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, + WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT, + WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT, + WMI_SERVICE_THERM_THROT, + WMI_SERVICE_RTT_RESPONDER_ROLE, + WMI_SERVICE_PER_PACKET_SW_ENCRYPT, + WMI_SERVICE_REPORT_AIRTIME, + WMI_SERVICE_SYNC_DELETE_CMDS, + WMI_SERVICE_TX_PWR_PER_PEER, + WMI_SERVICE_SUPPORT_EXTEND_ADDRESS, + WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT, + WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT, + + /* Remember to add the new value to wmi_service_name()! */ + + /* keep last */ + WMI_SERVICE_MAX, +}; + +enum wmi_10x_service { + WMI_10X_SERVICE_BEACON_OFFLOAD = 0, + WMI_10X_SERVICE_SCAN_OFFLOAD, + WMI_10X_SERVICE_ROAM_OFFLOAD, + WMI_10X_SERVICE_BCN_MISS_OFFLOAD, + WMI_10X_SERVICE_STA_PWRSAVE, + WMI_10X_SERVICE_STA_ADVANCED_PWRSAVE, + WMI_10X_SERVICE_AP_UAPSD, + WMI_10X_SERVICE_AP_DFS, + WMI_10X_SERVICE_11AC, + WMI_10X_SERVICE_BLOCKACK, + WMI_10X_SERVICE_PHYERR, + WMI_10X_SERVICE_BCN_FILTER, + WMI_10X_SERVICE_RTT, + WMI_10X_SERVICE_RATECTRL, + WMI_10X_SERVICE_WOW, + WMI_10X_SERVICE_RATECTRL_CACHE, + WMI_10X_SERVICE_IRAM_TIDS, + WMI_10X_SERVICE_BURST, + + /* introduced in 10.2 */ + WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT, + WMI_10X_SERVICE_FORCE_FW_HANG, + WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT, + WMI_10X_SERVICE_ATF, + WMI_10X_SERVICE_COEX_GPIO, + WMI_10X_SERVICE_AUX_SPECTRAL_INTF, + WMI_10X_SERVICE_AUX_CHAN_LOAD_INTF, + WMI_10X_SERVICE_BSS_CHANNEL_INFO_64, + WMI_10X_SERVICE_MESH, + WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT, + WMI_10X_SERVICE_PEER_STATS, + WMI_10X_SERVICE_RESET_CHIP, + WMI_10X_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, + WMI_10X_SERVICE_VDEV_BCN_RATE_CONTROL, + WMI_10X_SERVICE_PER_PACKET_SW_ENCRYPT, + WMI_10X_SERVICE_BB_TIMING_CONFIG_SUPPORT, +}; + +enum wmi_main_service { + WMI_MAIN_SERVICE_BEACON_OFFLOAD = 0, + WMI_MAIN_SERVICE_SCAN_OFFLOAD, + WMI_MAIN_SERVICE_ROAM_OFFLOAD, + WMI_MAIN_SERVICE_BCN_MISS_OFFLOAD, + WMI_MAIN_SERVICE_STA_PWRSAVE, + WMI_MAIN_SERVICE_STA_ADVANCED_PWRSAVE, + WMI_MAIN_SERVICE_AP_UAPSD, + WMI_MAIN_SERVICE_AP_DFS, + WMI_MAIN_SERVICE_11AC, + WMI_MAIN_SERVICE_BLOCKACK, + WMI_MAIN_SERVICE_PHYERR, + WMI_MAIN_SERVICE_BCN_FILTER, + WMI_MAIN_SERVICE_RTT, + WMI_MAIN_SERVICE_RATECTRL, + WMI_MAIN_SERVICE_WOW, + WMI_MAIN_SERVICE_RATECTRL_CACHE, + WMI_MAIN_SERVICE_IRAM_TIDS, + WMI_MAIN_SERVICE_ARPNS_OFFLOAD, + WMI_MAIN_SERVICE_NLO, + WMI_MAIN_SERVICE_GTK_OFFLOAD, + WMI_MAIN_SERVICE_SCAN_SCH, + WMI_MAIN_SERVICE_CSA_OFFLOAD, + WMI_MAIN_SERVICE_CHATTER, + WMI_MAIN_SERVICE_COEX_FREQAVOID, + WMI_MAIN_SERVICE_PACKET_POWER_SAVE, + WMI_MAIN_SERVICE_FORCE_FW_HANG, + WMI_MAIN_SERVICE_GPIO, + WMI_MAIN_SERVICE_STA_DTIM_PS_MODULATED_DTIM, + WMI_MAIN_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, + WMI_MAIN_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, + WMI_MAIN_SERVICE_STA_KEEP_ALIVE, + WMI_MAIN_SERVICE_TX_ENCAP, +}; + +enum wmi_10_4_service { + WMI_10_4_SERVICE_BEACON_OFFLOAD = 0, + WMI_10_4_SERVICE_SCAN_OFFLOAD, + WMI_10_4_SERVICE_ROAM_OFFLOAD, + WMI_10_4_SERVICE_BCN_MISS_OFFLOAD, + WMI_10_4_SERVICE_STA_PWRSAVE, + WMI_10_4_SERVICE_STA_ADVANCED_PWRSAVE, + WMI_10_4_SERVICE_AP_UAPSD, + WMI_10_4_SERVICE_AP_DFS, + WMI_10_4_SERVICE_11AC, + WMI_10_4_SERVICE_BLOCKACK, + WMI_10_4_SERVICE_PHYERR, + WMI_10_4_SERVICE_BCN_FILTER, + WMI_10_4_SERVICE_RTT, + WMI_10_4_SERVICE_RATECTRL, + WMI_10_4_SERVICE_WOW, + WMI_10_4_SERVICE_RATECTRL_CACHE, + WMI_10_4_SERVICE_IRAM_TIDS, + WMI_10_4_SERVICE_BURST, + WMI_10_4_SERVICE_SMART_ANTENNA_SW_SUPPORT, + WMI_10_4_SERVICE_GTK_OFFLOAD, + WMI_10_4_SERVICE_SCAN_SCH, + WMI_10_4_SERVICE_CSA_OFFLOAD, + WMI_10_4_SERVICE_CHATTER, + WMI_10_4_SERVICE_COEX_FREQAVOID, + WMI_10_4_SERVICE_PACKET_POWER_SAVE, + WMI_10_4_SERVICE_FORCE_FW_HANG, + WMI_10_4_SERVICE_SMART_ANTENNA_HW_SUPPORT, + WMI_10_4_SERVICE_GPIO, + WMI_10_4_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, + WMI_10_4_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, + WMI_10_4_SERVICE_STA_KEEP_ALIVE, + WMI_10_4_SERVICE_TX_ENCAP, + WMI_10_4_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, + WMI_10_4_SERVICE_EARLY_RX, + WMI_10_4_SERVICE_ENHANCED_PROXY_STA, + WMI_10_4_SERVICE_TT, + WMI_10_4_SERVICE_ATF, + WMI_10_4_SERVICE_PEER_CACHING, + WMI_10_4_SERVICE_COEX_GPIO, + WMI_10_4_SERVICE_AUX_SPECTRAL_INTF, + WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF, + WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64, + WMI_10_4_SERVICE_EXT_RES_CFG_SUPPORT, + WMI_10_4_SERVICE_MESH_NON_11S, + WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT, + WMI_10_4_SERVICE_PEER_STATS, + WMI_10_4_SERVICE_MESH_11S, + WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, + WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY, + WMI_10_4_SERVICE_TX_MODE_PUSH_PULL, + WMI_10_4_SERVICE_TX_MODE_DYNAMIC, + WMI_10_4_SERVICE_VDEV_RX_FILTER, + WMI_10_4_SERVICE_BTCOEX, + WMI_10_4_SERVICE_CHECK_CAL_VERSION, + WMI_10_4_SERVICE_DBGLOG_WARN2, + WMI_10_4_SERVICE_BTCOEX_DUTY_CYCLE, + WMI_10_4_SERVICE_4_WIRE_COEX_SUPPORT, + WMI_10_4_SERVICE_EXTENDED_NSS_SUPPORT, + WMI_10_4_SERVICE_PROG_GPIO_BAND_SELECT, + WMI_10_4_SERVICE_SMART_LOGGING_SUPPORT, + WMI_10_4_SERVICE_TDLS, + WMI_10_4_SERVICE_TDLS_OFFCHAN, + WMI_10_4_SERVICE_TDLS_UAPSD_BUFFER_STA, + WMI_10_4_SERVICE_TDLS_UAPSD_SLEEP_STA, + WMI_10_4_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, + WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY, + WMI_10_4_SERVICE_TDLS_WIDER_BANDWIDTH, + WMI_10_4_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, + WMI_10_4_SERVICE_HOST_DFS_CHECK_SUPPORT, + WMI_10_4_SERVICE_TPC_STATS_FINAL, + WMI_10_4_SERVICE_CFR_CAPTURE_SUPPORT, + WMI_10_4_SERVICE_TX_DATA_ACK_RSSI, + WMI_10_4_SERVICE_CFR_CAPTURE_IND_MSG_TYPE_LEGACY, + WMI_10_4_SERVICE_PER_PACKET_SW_ENCRYPT, + WMI_10_4_SERVICE_PEER_TID_CONFIGS_SUPPORT, + WMI_10_4_SERVICE_VDEV_BCN_RATE_CONTROL, + WMI_10_4_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, + WMI_10_4_SERVICE_HTT_ASSERT_TRIGGER_SUPPORT, + WMI_10_4_SERVICE_VDEV_FILTER_NEIGHBOR_RX_PACKETS, + WMI_10_4_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT, + WMI_10_4_SERVICE_PEER_CHWIDTH_CHANGE, + WMI_10_4_SERVICE_RX_FILTER_OUT_COUNT, + WMI_10_4_SERVICE_RTT_RESPONDER_ROLE, + WMI_10_4_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT, + WMI_10_4_SERVICE_REPORT_AIRTIME, + WMI_10_4_SERVICE_TX_PWR_PER_PEER, + WMI_10_4_SERVICE_FETCH_PEER_TX_PN, + WMI_10_4_SERVICE_MULTIPLE_VDEV_RESTART, + WMI_10_4_SERVICE_ENHANCED_RADIO_COUNTERS, + WMI_10_4_SERVICE_QINQ_SUPPORT, + WMI_10_4_SERVICE_RESET_CHIP, +}; + +static inline char *wmi_service_name(enum wmi_service service_id) { +#define SVCSTR(x) case x: return #x + switch (service_id) { - case WMI_SERVICE_BEACON_OFFLOAD: - return "BEACON_OFFLOAD"; - case WMI_SERVICE_SCAN_OFFLOAD: - return "SCAN_OFFLOAD"; - case WMI_SERVICE_ROAM_OFFLOAD: - return "ROAM_OFFLOAD"; - case WMI_SERVICE_BCN_MISS_OFFLOAD: - return "BCN_MISS_OFFLOAD"; - case WMI_SERVICE_STA_PWRSAVE: - return "STA_PWRSAVE"; - case WMI_SERVICE_STA_ADVANCED_PWRSAVE: - return "STA_ADVANCED_PWRSAVE"; - case WMI_SERVICE_AP_UAPSD: - return "AP_UAPSD"; - case WMI_SERVICE_AP_DFS: - return "AP_DFS"; - case WMI_SERVICE_11AC: - return "11AC"; - case WMI_SERVICE_BLOCKACK: - return "BLOCKACK"; - case WMI_SERVICE_PHYERR: - return "PHYERR"; - case WMI_SERVICE_BCN_FILTER: - return "BCN_FILTER"; - case WMI_SERVICE_RTT: - return "RTT"; - case WMI_SERVICE_RATECTRL: - return "RATECTRL"; - case WMI_SERVICE_WOW: - return "WOW"; - case WMI_SERVICE_RATECTRL_CACHE: - return "RATECTRL CACHE"; - case WMI_SERVICE_IRAM_TIDS: - return "IRAM TIDS"; - case WMI_SERVICE_ARPNS_OFFLOAD: - return "ARPNS_OFFLOAD"; - case WMI_SERVICE_NLO: - return "NLO"; - case WMI_SERVICE_GTK_OFFLOAD: - return "GTK_OFFLOAD"; - case WMI_SERVICE_SCAN_SCH: - return "SCAN_SCH"; - case WMI_SERVICE_CSA_OFFLOAD: - return "CSA_OFFLOAD"; - case WMI_SERVICE_CHATTER: - return "CHATTER"; - case WMI_SERVICE_COEX_FREQAVOID: - return "COEX_FREQAVOID"; - case WMI_SERVICE_PACKET_POWER_SAVE: - return "PACKET_POWER_SAVE"; - case WMI_SERVICE_FORCE_FW_HANG: - return "FORCE FW HANG"; - case WMI_SERVICE_GPIO: - return "GPIO"; - case WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM: - return "MODULATED DTIM"; - case WMI_STA_UAPSD_BASIC_AUTO_TRIG: - return "BASIC UAPSD"; - case WMI_STA_UAPSD_VAR_AUTO_TRIG: - return "VAR UAPSD"; - case WMI_SERVICE_STA_KEEP_ALIVE: - return "STA KEEP ALIVE"; - case WMI_SERVICE_TX_ENCAP: - return "TX ENCAP"; - default: - return "UNKNOWN SERVICE\n"; + SVCSTR(WMI_SERVICE_BEACON_OFFLOAD); + SVCSTR(WMI_SERVICE_SCAN_OFFLOAD); + SVCSTR(WMI_SERVICE_ROAM_OFFLOAD); + SVCSTR(WMI_SERVICE_BCN_MISS_OFFLOAD); + SVCSTR(WMI_SERVICE_STA_PWRSAVE); + SVCSTR(WMI_SERVICE_STA_ADVANCED_PWRSAVE); + SVCSTR(WMI_SERVICE_AP_UAPSD); + SVCSTR(WMI_SERVICE_AP_DFS); + SVCSTR(WMI_SERVICE_11AC); + SVCSTR(WMI_SERVICE_BLOCKACK); + SVCSTR(WMI_SERVICE_PHYERR); + SVCSTR(WMI_SERVICE_BCN_FILTER); + SVCSTR(WMI_SERVICE_RTT); + SVCSTR(WMI_SERVICE_RATECTRL); + SVCSTR(WMI_SERVICE_WOW); + SVCSTR(WMI_SERVICE_RATECTRL_CACHE); + SVCSTR(WMI_SERVICE_IRAM_TIDS); + SVCSTR(WMI_SERVICE_ARPNS_OFFLOAD); + SVCSTR(WMI_SERVICE_NLO); + SVCSTR(WMI_SERVICE_GTK_OFFLOAD); + SVCSTR(WMI_SERVICE_SCAN_SCH); + SVCSTR(WMI_SERVICE_CSA_OFFLOAD); + SVCSTR(WMI_SERVICE_CHATTER); + SVCSTR(WMI_SERVICE_COEX_FREQAVOID); + SVCSTR(WMI_SERVICE_PACKET_POWER_SAVE); + SVCSTR(WMI_SERVICE_FORCE_FW_HANG); + SVCSTR(WMI_SERVICE_GPIO); + SVCSTR(WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM); + SVCSTR(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG); + SVCSTR(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG); + SVCSTR(WMI_SERVICE_STA_KEEP_ALIVE); + SVCSTR(WMI_SERVICE_TX_ENCAP); + SVCSTR(WMI_SERVICE_BURST); + SVCSTR(WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT); + SVCSTR(WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT); + SVCSTR(WMI_SERVICE_ROAM_SCAN_OFFLOAD); + SVCSTR(WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC); + SVCSTR(WMI_SERVICE_EARLY_RX); + SVCSTR(WMI_SERVICE_STA_SMPS); + SVCSTR(WMI_SERVICE_FWTEST); + SVCSTR(WMI_SERVICE_STA_WMMAC); + SVCSTR(WMI_SERVICE_TDLS); + SVCSTR(WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE); + SVCSTR(WMI_SERVICE_ADAPTIVE_OCS); + SVCSTR(WMI_SERVICE_BA_SSN_SUPPORT); + SVCSTR(WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE); + SVCSTR(WMI_SERVICE_WLAN_HB); + SVCSTR(WMI_SERVICE_LTE_ANT_SHARE_SUPPORT); + SVCSTR(WMI_SERVICE_BATCH_SCAN); + SVCSTR(WMI_SERVICE_QPOWER); + SVCSTR(WMI_SERVICE_PLMREQ); + SVCSTR(WMI_SERVICE_THERMAL_MGMT); + SVCSTR(WMI_SERVICE_RMC); + SVCSTR(WMI_SERVICE_MHF_OFFLOAD); + SVCSTR(WMI_SERVICE_COEX_SAR); + SVCSTR(WMI_SERVICE_BCN_TXRATE_OVERRIDE); + SVCSTR(WMI_SERVICE_NAN); + SVCSTR(WMI_SERVICE_L1SS_STAT); + SVCSTR(WMI_SERVICE_ESTIMATE_LINKSPEED); + SVCSTR(WMI_SERVICE_OBSS_SCAN); + SVCSTR(WMI_SERVICE_TDLS_OFFCHAN); + SVCSTR(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA); + SVCSTR(WMI_SERVICE_TDLS_UAPSD_SLEEP_STA); + SVCSTR(WMI_SERVICE_IBSS_PWRSAVE); + SVCSTR(WMI_SERVICE_LPASS); + SVCSTR(WMI_SERVICE_EXTSCAN); + SVCSTR(WMI_SERVICE_D0WOW); + SVCSTR(WMI_SERVICE_HSOFFLOAD); + SVCSTR(WMI_SERVICE_ROAM_HO_OFFLOAD); + SVCSTR(WMI_SERVICE_RX_FULL_REORDER); + SVCSTR(WMI_SERVICE_DHCP_OFFLOAD); + SVCSTR(WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT); + SVCSTR(WMI_SERVICE_MDNS_OFFLOAD); + SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD); + SVCSTR(WMI_SERVICE_ATF); + SVCSTR(WMI_SERVICE_COEX_GPIO); + SVCSTR(WMI_SERVICE_ENHANCED_PROXY_STA); + SVCSTR(WMI_SERVICE_TT); + SVCSTR(WMI_SERVICE_PEER_CACHING); + SVCSTR(WMI_SERVICE_AUX_SPECTRAL_INTF); + SVCSTR(WMI_SERVICE_AUX_CHAN_LOAD_INTF); + SVCSTR(WMI_SERVICE_BSS_CHANNEL_INFO_64); + SVCSTR(WMI_SERVICE_EXT_RES_CFG_SUPPORT); + SVCSTR(WMI_SERVICE_MESH_11S); + SVCSTR(WMI_SERVICE_MESH_NON_11S); + SVCSTR(WMI_SERVICE_PEER_STATS); + SVCSTR(WMI_SERVICE_RESTRT_CHNL_SUPPORT); + SVCSTR(WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT); + SVCSTR(WMI_SERVICE_TX_MODE_PUSH_ONLY); + SVCSTR(WMI_SERVICE_TX_MODE_PUSH_PULL); + SVCSTR(WMI_SERVICE_TX_MODE_DYNAMIC); + SVCSTR(WMI_SERVICE_VDEV_RX_FILTER); + SVCSTR(WMI_SERVICE_BTCOEX); + SVCSTR(WMI_SERVICE_CHECK_CAL_VERSION); + SVCSTR(WMI_SERVICE_DBGLOG_WARN2); + SVCSTR(WMI_SERVICE_BTCOEX_DUTY_CYCLE); + SVCSTR(WMI_SERVICE_4_WIRE_COEX_SUPPORT); + SVCSTR(WMI_SERVICE_EXTENDED_NSS_SUPPORT); + SVCSTR(WMI_SERVICE_PROG_GPIO_BAND_SELECT); + SVCSTR(WMI_SERVICE_SMART_LOGGING_SUPPORT); + SVCSTR(WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE); + SVCSTR(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY); + SVCSTR(WMI_SERVICE_MGMT_TX_WMI); + SVCSTR(WMI_SERVICE_TDLS_WIDER_BANDWIDTH); + SVCSTR(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS); + SVCSTR(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT); + SVCSTR(WMI_SERVICE_TPC_STATS_FINAL); + SVCSTR(WMI_SERVICE_RESET_CHIP); + SVCSTR(WMI_SERVICE_SPOOF_MAC_SUPPORT); + SVCSTR(WMI_SERVICE_TX_DATA_ACK_RSSI); + SVCSTR(WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT); + SVCSTR(WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT); + SVCSTR(WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT); + SVCSTR(WMI_SERVICE_THERM_THROT); + SVCSTR(WMI_SERVICE_RTT_RESPONDER_ROLE); + SVCSTR(WMI_SERVICE_PER_PACKET_SW_ENCRYPT); + SVCSTR(WMI_SERVICE_REPORT_AIRTIME); + SVCSTR(WMI_SERVICE_SYNC_DELETE_CMDS); + SVCSTR(WMI_SERVICE_TX_PWR_PER_PEER); + SVCSTR(WMI_SERVICE_SUPPORT_EXTEND_ADDRESS); + SVCSTR(WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT); + SVCSTR(WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT); + + case WMI_SERVICE_MAX: + return NULL; } + +#undef SVCSTR + + return NULL; } +#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \ + ((svc_id) < (len) && \ + __le32_to_cpu((wmi_svc_bmap)[(svc_id) / (sizeof(u32))]) & \ + BIT((svc_id) % (sizeof(u32)))) + +/* This extension is required to accommodate new services, current limit + * for wmi_services is 64 as target is using only 4-bits of each 32-bit + * wmi_service word. Extending this to make use of remaining unused bits + * for new services. + */ +#define WMI_EXT_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \ + ((svc_id) >= (len) && \ + __le32_to_cpu((wmi_svc_bmap)[((svc_id) - (len)) / 28]) & \ + BIT(((((svc_id) - (len)) % 28) & 0x1f) + 4)) + +#define SVCMAP(x, y, len) \ + do { \ + if ((WMI_SERVICE_IS_ENABLED((in), (x), (len))) || \ + (WMI_EXT_SERVICE_IS_ENABLED((in), (x), (len)))) \ + __set_bit(y, out); \ + } while (0) -#define WMI_SERVICE_BM_SIZE \ - ((WMI_MAX_SERVICE + sizeof(u32) - 1)/sizeof(u32)) +static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out, + size_t len) +{ + SVCMAP(WMI_10X_SERVICE_BEACON_OFFLOAD, + WMI_SERVICE_BEACON_OFFLOAD, len); + SVCMAP(WMI_10X_SERVICE_SCAN_OFFLOAD, + WMI_SERVICE_SCAN_OFFLOAD, len); + SVCMAP(WMI_10X_SERVICE_ROAM_OFFLOAD, + WMI_SERVICE_ROAM_OFFLOAD, len); + SVCMAP(WMI_10X_SERVICE_BCN_MISS_OFFLOAD, + WMI_SERVICE_BCN_MISS_OFFLOAD, len); + SVCMAP(WMI_10X_SERVICE_STA_PWRSAVE, + WMI_SERVICE_STA_PWRSAVE, len); + SVCMAP(WMI_10X_SERVICE_STA_ADVANCED_PWRSAVE, + WMI_SERVICE_STA_ADVANCED_PWRSAVE, len); + SVCMAP(WMI_10X_SERVICE_AP_UAPSD, + WMI_SERVICE_AP_UAPSD, len); + SVCMAP(WMI_10X_SERVICE_AP_DFS, + WMI_SERVICE_AP_DFS, len); + SVCMAP(WMI_10X_SERVICE_11AC, + WMI_SERVICE_11AC, len); + SVCMAP(WMI_10X_SERVICE_BLOCKACK, + WMI_SERVICE_BLOCKACK, len); + SVCMAP(WMI_10X_SERVICE_PHYERR, + WMI_SERVICE_PHYERR, len); + SVCMAP(WMI_10X_SERVICE_BCN_FILTER, + WMI_SERVICE_BCN_FILTER, len); + SVCMAP(WMI_10X_SERVICE_RTT, + WMI_SERVICE_RTT, len); + SVCMAP(WMI_10X_SERVICE_RATECTRL, + WMI_SERVICE_RATECTRL, len); + SVCMAP(WMI_10X_SERVICE_WOW, + WMI_SERVICE_WOW, len); + SVCMAP(WMI_10X_SERVICE_RATECTRL_CACHE, + WMI_SERVICE_RATECTRL_CACHE, len); + SVCMAP(WMI_10X_SERVICE_IRAM_TIDS, + WMI_SERVICE_IRAM_TIDS, len); + SVCMAP(WMI_10X_SERVICE_BURST, + WMI_SERVICE_BURST, len); + SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT, + WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, len); + SVCMAP(WMI_10X_SERVICE_FORCE_FW_HANG, + WMI_SERVICE_FORCE_FW_HANG, len); + SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT, + WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len); + SVCMAP(WMI_10X_SERVICE_ATF, + WMI_SERVICE_ATF, len); + SVCMAP(WMI_10X_SERVICE_COEX_GPIO, + WMI_SERVICE_COEX_GPIO, len); + SVCMAP(WMI_10X_SERVICE_AUX_SPECTRAL_INTF, + WMI_SERVICE_AUX_SPECTRAL_INTF, len); + SVCMAP(WMI_10X_SERVICE_AUX_CHAN_LOAD_INTF, + WMI_SERVICE_AUX_CHAN_LOAD_INTF, len); + SVCMAP(WMI_10X_SERVICE_BSS_CHANNEL_INFO_64, + WMI_SERVICE_BSS_CHANNEL_INFO_64, len); + SVCMAP(WMI_10X_SERVICE_MESH, + WMI_SERVICE_MESH_11S, len); + SVCMAP(WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT, + WMI_SERVICE_EXT_RES_CFG_SUPPORT, len); + SVCMAP(WMI_10X_SERVICE_PEER_STATS, + WMI_SERVICE_PEER_STATS, len); + SVCMAP(WMI_10X_SERVICE_RESET_CHIP, + WMI_SERVICE_RESET_CHIP, len); + SVCMAP(WMI_10X_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, + WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, len); + SVCMAP(WMI_10X_SERVICE_BB_TIMING_CONFIG_SUPPORT, + WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT, len); + SVCMAP(WMI_10X_SERVICE_PER_PACKET_SW_ENCRYPT, + WMI_SERVICE_PER_PACKET_SW_ENCRYPT, len); +} + +static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out, + size_t len) +{ + SVCMAP(WMI_MAIN_SERVICE_BEACON_OFFLOAD, + WMI_SERVICE_BEACON_OFFLOAD, len); + SVCMAP(WMI_MAIN_SERVICE_SCAN_OFFLOAD, + WMI_SERVICE_SCAN_OFFLOAD, len); + SVCMAP(WMI_MAIN_SERVICE_ROAM_OFFLOAD, + WMI_SERVICE_ROAM_OFFLOAD, len); + SVCMAP(WMI_MAIN_SERVICE_BCN_MISS_OFFLOAD, + WMI_SERVICE_BCN_MISS_OFFLOAD, len); + SVCMAP(WMI_MAIN_SERVICE_STA_PWRSAVE, + WMI_SERVICE_STA_PWRSAVE, len); + SVCMAP(WMI_MAIN_SERVICE_STA_ADVANCED_PWRSAVE, + WMI_SERVICE_STA_ADVANCED_PWRSAVE, len); + SVCMAP(WMI_MAIN_SERVICE_AP_UAPSD, + WMI_SERVICE_AP_UAPSD, len); + SVCMAP(WMI_MAIN_SERVICE_AP_DFS, + WMI_SERVICE_AP_DFS, len); + SVCMAP(WMI_MAIN_SERVICE_11AC, + WMI_SERVICE_11AC, len); + SVCMAP(WMI_MAIN_SERVICE_BLOCKACK, + WMI_SERVICE_BLOCKACK, len); + SVCMAP(WMI_MAIN_SERVICE_PHYERR, + WMI_SERVICE_PHYERR, len); + SVCMAP(WMI_MAIN_SERVICE_BCN_FILTER, + WMI_SERVICE_BCN_FILTER, len); + SVCMAP(WMI_MAIN_SERVICE_RTT, + WMI_SERVICE_RTT, len); + SVCMAP(WMI_MAIN_SERVICE_RATECTRL, + WMI_SERVICE_RATECTRL, len); + SVCMAP(WMI_MAIN_SERVICE_WOW, + WMI_SERVICE_WOW, len); + SVCMAP(WMI_MAIN_SERVICE_RATECTRL_CACHE, + WMI_SERVICE_RATECTRL_CACHE, len); + SVCMAP(WMI_MAIN_SERVICE_IRAM_TIDS, + WMI_SERVICE_IRAM_TIDS, len); + SVCMAP(WMI_MAIN_SERVICE_ARPNS_OFFLOAD, + WMI_SERVICE_ARPNS_OFFLOAD, len); + SVCMAP(WMI_MAIN_SERVICE_NLO, + WMI_SERVICE_NLO, len); + SVCMAP(WMI_MAIN_SERVICE_GTK_OFFLOAD, + WMI_SERVICE_GTK_OFFLOAD, len); + SVCMAP(WMI_MAIN_SERVICE_SCAN_SCH, + WMI_SERVICE_SCAN_SCH, len); + SVCMAP(WMI_MAIN_SERVICE_CSA_OFFLOAD, + WMI_SERVICE_CSA_OFFLOAD, len); + SVCMAP(WMI_MAIN_SERVICE_CHATTER, + WMI_SERVICE_CHATTER, len); + SVCMAP(WMI_MAIN_SERVICE_COEX_FREQAVOID, + WMI_SERVICE_COEX_FREQAVOID, len); + SVCMAP(WMI_MAIN_SERVICE_PACKET_POWER_SAVE, + WMI_SERVICE_PACKET_POWER_SAVE, len); + SVCMAP(WMI_MAIN_SERVICE_FORCE_FW_HANG, + WMI_SERVICE_FORCE_FW_HANG, len); + SVCMAP(WMI_MAIN_SERVICE_GPIO, + WMI_SERVICE_GPIO, len); + SVCMAP(WMI_MAIN_SERVICE_STA_DTIM_PS_MODULATED_DTIM, + WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, len); + SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, + WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len); + SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, + WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len); + SVCMAP(WMI_MAIN_SERVICE_STA_KEEP_ALIVE, + WMI_SERVICE_STA_KEEP_ALIVE, len); + SVCMAP(WMI_MAIN_SERVICE_TX_ENCAP, + WMI_SERVICE_TX_ENCAP, len); +} + +static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out, + size_t len) +{ + SVCMAP(WMI_10_4_SERVICE_BEACON_OFFLOAD, + WMI_SERVICE_BEACON_OFFLOAD, len); + SVCMAP(WMI_10_4_SERVICE_SCAN_OFFLOAD, + WMI_SERVICE_SCAN_OFFLOAD, len); + SVCMAP(WMI_10_4_SERVICE_ROAM_OFFLOAD, + WMI_SERVICE_ROAM_OFFLOAD, len); + SVCMAP(WMI_10_4_SERVICE_BCN_MISS_OFFLOAD, + WMI_SERVICE_BCN_MISS_OFFLOAD, len); + SVCMAP(WMI_10_4_SERVICE_STA_PWRSAVE, + WMI_SERVICE_STA_PWRSAVE, len); + SVCMAP(WMI_10_4_SERVICE_STA_ADVANCED_PWRSAVE, + WMI_SERVICE_STA_ADVANCED_PWRSAVE, len); + SVCMAP(WMI_10_4_SERVICE_AP_UAPSD, + WMI_SERVICE_AP_UAPSD, len); + SVCMAP(WMI_10_4_SERVICE_AP_DFS, + WMI_SERVICE_AP_DFS, len); + SVCMAP(WMI_10_4_SERVICE_11AC, + WMI_SERVICE_11AC, len); + SVCMAP(WMI_10_4_SERVICE_BLOCKACK, + WMI_SERVICE_BLOCKACK, len); + SVCMAP(WMI_10_4_SERVICE_PHYERR, + WMI_SERVICE_PHYERR, len); + SVCMAP(WMI_10_4_SERVICE_BCN_FILTER, + WMI_SERVICE_BCN_FILTER, len); + SVCMAP(WMI_10_4_SERVICE_RTT, + WMI_SERVICE_RTT, len); + SVCMAP(WMI_10_4_SERVICE_RATECTRL, + WMI_SERVICE_RATECTRL, len); + SVCMAP(WMI_10_4_SERVICE_WOW, + WMI_SERVICE_WOW, len); + SVCMAP(WMI_10_4_SERVICE_RATECTRL_CACHE, + WMI_SERVICE_RATECTRL_CACHE, len); + SVCMAP(WMI_10_4_SERVICE_IRAM_TIDS, + WMI_SERVICE_IRAM_TIDS, len); + SVCMAP(WMI_10_4_SERVICE_BURST, + WMI_SERVICE_BURST, len); + SVCMAP(WMI_10_4_SERVICE_SMART_ANTENNA_SW_SUPPORT, + WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_GTK_OFFLOAD, + WMI_SERVICE_GTK_OFFLOAD, len); + SVCMAP(WMI_10_4_SERVICE_SCAN_SCH, + WMI_SERVICE_SCAN_SCH, len); + SVCMAP(WMI_10_4_SERVICE_CSA_OFFLOAD, + WMI_SERVICE_CSA_OFFLOAD, len); + SVCMAP(WMI_10_4_SERVICE_CHATTER, + WMI_SERVICE_CHATTER, len); + SVCMAP(WMI_10_4_SERVICE_COEX_FREQAVOID, + WMI_SERVICE_COEX_FREQAVOID, len); + SVCMAP(WMI_10_4_SERVICE_PACKET_POWER_SAVE, + WMI_SERVICE_PACKET_POWER_SAVE, len); + SVCMAP(WMI_10_4_SERVICE_FORCE_FW_HANG, + WMI_SERVICE_FORCE_FW_HANG, len); + SVCMAP(WMI_10_4_SERVICE_SMART_ANTENNA_HW_SUPPORT, + WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_GPIO, + WMI_SERVICE_GPIO, len); + SVCMAP(WMI_10_4_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, + WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len); + SVCMAP(WMI_10_4_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, + WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len); + SVCMAP(WMI_10_4_SERVICE_STA_KEEP_ALIVE, + WMI_SERVICE_STA_KEEP_ALIVE, len); + SVCMAP(WMI_10_4_SERVICE_TX_ENCAP, + WMI_SERVICE_TX_ENCAP, len); + SVCMAP(WMI_10_4_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, + WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, len); + SVCMAP(WMI_10_4_SERVICE_EARLY_RX, + WMI_SERVICE_EARLY_RX, len); + SVCMAP(WMI_10_4_SERVICE_ENHANCED_PROXY_STA, + WMI_SERVICE_ENHANCED_PROXY_STA, len); + SVCMAP(WMI_10_4_SERVICE_TT, + WMI_SERVICE_TT, len); + SVCMAP(WMI_10_4_SERVICE_ATF, + WMI_SERVICE_ATF, len); + SVCMAP(WMI_10_4_SERVICE_PEER_CACHING, + WMI_SERVICE_PEER_CACHING, len); + SVCMAP(WMI_10_4_SERVICE_COEX_GPIO, + WMI_SERVICE_COEX_GPIO, len); + SVCMAP(WMI_10_4_SERVICE_AUX_SPECTRAL_INTF, + WMI_SERVICE_AUX_SPECTRAL_INTF, len); + SVCMAP(WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF, + WMI_SERVICE_AUX_CHAN_LOAD_INTF, len); + SVCMAP(WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64, + WMI_SERVICE_BSS_CHANNEL_INFO_64, len); + SVCMAP(WMI_10_4_SERVICE_EXT_RES_CFG_SUPPORT, + WMI_SERVICE_EXT_RES_CFG_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_MESH_NON_11S, + WMI_SERVICE_MESH_NON_11S, len); + SVCMAP(WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT, + WMI_SERVICE_RESTRT_CHNL_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_PEER_STATS, + WMI_SERVICE_PEER_STATS, len); + SVCMAP(WMI_10_4_SERVICE_MESH_11S, + WMI_SERVICE_MESH_11S, len); + SVCMAP(WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, + WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY, + WMI_SERVICE_TX_MODE_PUSH_ONLY, len); + SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_PULL, + WMI_SERVICE_TX_MODE_PUSH_PULL, len); + SVCMAP(WMI_10_4_SERVICE_TX_MODE_DYNAMIC, + WMI_SERVICE_TX_MODE_DYNAMIC, len); + SVCMAP(WMI_10_4_SERVICE_VDEV_RX_FILTER, + WMI_SERVICE_VDEV_RX_FILTER, len); + SVCMAP(WMI_10_4_SERVICE_BTCOEX, + WMI_SERVICE_BTCOEX, len); + SVCMAP(WMI_10_4_SERVICE_CHECK_CAL_VERSION, + WMI_SERVICE_CHECK_CAL_VERSION, len); + SVCMAP(WMI_10_4_SERVICE_DBGLOG_WARN2, + WMI_SERVICE_DBGLOG_WARN2, len); + SVCMAP(WMI_10_4_SERVICE_BTCOEX_DUTY_CYCLE, + WMI_SERVICE_BTCOEX_DUTY_CYCLE, len); + SVCMAP(WMI_10_4_SERVICE_4_WIRE_COEX_SUPPORT, + WMI_SERVICE_4_WIRE_COEX_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_EXTENDED_NSS_SUPPORT, + WMI_SERVICE_EXTENDED_NSS_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_PROG_GPIO_BAND_SELECT, + WMI_SERVICE_PROG_GPIO_BAND_SELECT, len); + SVCMAP(WMI_10_4_SERVICE_SMART_LOGGING_SUPPORT, + WMI_SERVICE_SMART_LOGGING_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_TDLS, + WMI_SERVICE_TDLS, len); + SVCMAP(WMI_10_4_SERVICE_TDLS_OFFCHAN, + WMI_SERVICE_TDLS_OFFCHAN, len); + SVCMAP(WMI_10_4_SERVICE_TDLS_UAPSD_BUFFER_STA, + WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, len); + SVCMAP(WMI_10_4_SERVICE_TDLS_UAPSD_SLEEP_STA, + WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, len); + SVCMAP(WMI_10_4_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, + WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, len); + SVCMAP(WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY, + WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, len); + SVCMAP(WMI_10_4_SERVICE_TDLS_WIDER_BANDWIDTH, + WMI_SERVICE_TDLS_WIDER_BANDWIDTH, len); + SVCMAP(WMI_10_4_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, + WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, len); + SVCMAP(WMI_10_4_SERVICE_HOST_DFS_CHECK_SUPPORT, + WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_TPC_STATS_FINAL, + WMI_SERVICE_TPC_STATS_FINAL, len); + SVCMAP(WMI_10_4_SERVICE_TX_DATA_ACK_RSSI, + WMI_SERVICE_TX_DATA_ACK_RSSI, len); + SVCMAP(WMI_10_4_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, + WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT, + WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_RTT_RESPONDER_ROLE, + WMI_SERVICE_RTT_RESPONDER_ROLE, len); + SVCMAP(WMI_10_4_SERVICE_PER_PACKET_SW_ENCRYPT, + WMI_SERVICE_PER_PACKET_SW_ENCRYPT, len); + SVCMAP(WMI_10_4_SERVICE_REPORT_AIRTIME, + WMI_SERVICE_REPORT_AIRTIME, len); + SVCMAP(WMI_10_4_SERVICE_TX_PWR_PER_PEER, + WMI_SERVICE_TX_PWR_PER_PEER, len); + SVCMAP(WMI_10_4_SERVICE_RESET_CHIP, + WMI_SERVICE_RESET_CHIP, len); + SVCMAP(WMI_10_4_SERVICE_PEER_TID_CONFIGS_SUPPORT, + WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT, + WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT, len); +} + +#undef SVCMAP /* 2 word representation of MAC addr */ struct wmi_mac_addr { @@ -198,15 +858,195 @@ struct wmi_mac_addr { } __packed; } __packed; -/* macro to convert MAC address from WMI word format to char array */ -#define WMI_MAC_ADDR_TO_CHAR_ARRAY(pwmi_mac_addr, c_macaddr) do { \ - (c_macaddr)[0] = ((pwmi_mac_addr)->word0) & 0xff; \ - (c_macaddr)[1] = (((pwmi_mac_addr)->word0) >> 8) & 0xff; \ - (c_macaddr)[2] = (((pwmi_mac_addr)->word0) >> 16) & 0xff; \ - (c_macaddr)[3] = (((pwmi_mac_addr)->word0) >> 24) & 0xff; \ - (c_macaddr)[4] = ((pwmi_mac_addr)->word1) & 0xff; \ - (c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \ - } while (0) +struct wmi_cmd_map { + u32 init_cmdid; + u32 start_scan_cmdid; + u32 stop_scan_cmdid; + u32 scan_chan_list_cmdid; + u32 scan_sch_prio_tbl_cmdid; + u32 scan_prob_req_oui_cmdid; + u32 pdev_set_regdomain_cmdid; + u32 pdev_set_channel_cmdid; + u32 pdev_set_param_cmdid; + u32 pdev_pktlog_enable_cmdid; + u32 pdev_pktlog_disable_cmdid; + u32 pdev_set_wmm_params_cmdid; + u32 pdev_set_ht_cap_ie_cmdid; + u32 pdev_set_vht_cap_ie_cmdid; + u32 pdev_set_dscp_tid_map_cmdid; + u32 pdev_set_quiet_mode_cmdid; + u32 pdev_green_ap_ps_enable_cmdid; + u32 pdev_get_tpc_config_cmdid; + u32 pdev_set_base_macaddr_cmdid; + u32 vdev_create_cmdid; + u32 vdev_delete_cmdid; + u32 vdev_start_request_cmdid; + u32 vdev_restart_request_cmdid; + u32 vdev_up_cmdid; + u32 vdev_stop_cmdid; + u32 vdev_down_cmdid; + u32 vdev_set_param_cmdid; + u32 vdev_install_key_cmdid; + u32 peer_create_cmdid; + u32 peer_delete_cmdid; + u32 peer_flush_tids_cmdid; + u32 peer_set_param_cmdid; + u32 peer_assoc_cmdid; + u32 peer_add_wds_entry_cmdid; + u32 peer_remove_wds_entry_cmdid; + u32 peer_mcast_group_cmdid; + u32 bcn_tx_cmdid; + u32 pdev_send_bcn_cmdid; + u32 bcn_tmpl_cmdid; + u32 bcn_filter_rx_cmdid; + u32 prb_req_filter_rx_cmdid; + u32 mgmt_tx_cmdid; + u32 mgmt_tx_send_cmdid; + u32 prb_tmpl_cmdid; + u32 addba_clear_resp_cmdid; + u32 addba_send_cmdid; + u32 addba_status_cmdid; + u32 delba_send_cmdid; + u32 addba_set_resp_cmdid; + u32 send_singleamsdu_cmdid; + u32 sta_powersave_mode_cmdid; + u32 sta_powersave_param_cmdid; + u32 sta_mimo_ps_mode_cmdid; + u32 pdev_dfs_enable_cmdid; + u32 pdev_dfs_disable_cmdid; + u32 roam_scan_mode; + u32 roam_scan_rssi_threshold; + u32 roam_scan_period; + u32 roam_scan_rssi_change_threshold; + u32 roam_ap_profile; + u32 ofl_scan_add_ap_profile; + u32 ofl_scan_remove_ap_profile; + u32 ofl_scan_period; + u32 p2p_dev_set_device_info; + u32 p2p_dev_set_discoverability; + u32 p2p_go_set_beacon_ie; + u32 p2p_go_set_probe_resp_ie; + u32 p2p_set_vendor_ie_data_cmdid; + u32 ap_ps_peer_param_cmdid; + u32 ap_ps_peer_uapsd_coex_cmdid; + u32 peer_rate_retry_sched_cmdid; + u32 wlan_profile_trigger_cmdid; + u32 wlan_profile_set_hist_intvl_cmdid; + u32 wlan_profile_get_profile_data_cmdid; + u32 wlan_profile_enable_profile_id_cmdid; + u32 wlan_profile_list_profile_id_cmdid; + u32 pdev_suspend_cmdid; + u32 pdev_resume_cmdid; + u32 add_bcn_filter_cmdid; + u32 rmv_bcn_filter_cmdid; + u32 wow_add_wake_pattern_cmdid; + u32 wow_del_wake_pattern_cmdid; + u32 wow_enable_disable_wake_event_cmdid; + u32 wow_enable_cmdid; + u32 wow_hostwakeup_from_sleep_cmdid; + u32 rtt_measreq_cmdid; + u32 rtt_tsf_cmdid; + u32 vdev_spectral_scan_configure_cmdid; + u32 vdev_spectral_scan_enable_cmdid; + u32 request_stats_cmdid; + u32 request_peer_stats_info_cmdid; + u32 set_arp_ns_offload_cmdid; + u32 network_list_offload_config_cmdid; + u32 gtk_offload_cmdid; + u32 csa_offload_enable_cmdid; + u32 csa_offload_chanswitch_cmdid; + u32 chatter_set_mode_cmdid; + u32 peer_tid_addba_cmdid; + u32 peer_tid_delba_cmdid; + u32 sta_dtim_ps_method_cmdid; + u32 sta_uapsd_auto_trig_cmdid; + u32 sta_keepalive_cmd; + u32 echo_cmdid; + u32 pdev_utf_cmdid; + u32 dbglog_cfg_cmdid; + u32 pdev_qvit_cmdid; + u32 pdev_ftm_intg_cmdid; + u32 vdev_set_keepalive_cmdid; + u32 vdev_get_keepalive_cmdid; + u32 force_fw_hang_cmdid; + u32 gpio_config_cmdid; + u32 gpio_output_cmdid; + u32 pdev_get_temperature_cmdid; + u32 vdev_set_wmm_params_cmdid; + u32 tdls_set_state_cmdid; + u32 tdls_peer_update_cmdid; + u32 adaptive_qcs_cmdid; + u32 scan_update_request_cmdid; + u32 vdev_standby_response_cmdid; + u32 vdev_resume_response_cmdid; + u32 wlan_peer_caching_add_peer_cmdid; + u32 wlan_peer_caching_evict_peer_cmdid; + u32 wlan_peer_caching_restore_peer_cmdid; + u32 wlan_peer_caching_print_all_peers_info_cmdid; + u32 peer_update_wds_entry_cmdid; + u32 peer_add_proxy_sta_entry_cmdid; + u32 rtt_keepalive_cmdid; + u32 oem_req_cmdid; + u32 nan_cmdid; + u32 vdev_ratemask_cmdid; + u32 qboost_cfg_cmdid; + u32 pdev_smart_ant_enable_cmdid; + u32 pdev_smart_ant_set_rx_antenna_cmdid; + u32 peer_smart_ant_set_tx_antenna_cmdid; + u32 peer_smart_ant_set_train_info_cmdid; + u32 peer_smart_ant_set_node_config_ops_cmdid; + u32 pdev_set_antenna_switch_table_cmdid; + u32 pdev_set_ctl_table_cmdid; + u32 pdev_set_mimogain_table_cmdid; + u32 pdev_ratepwr_table_cmdid; + u32 pdev_ratepwr_chainmsk_table_cmdid; + u32 pdev_fips_cmdid; + u32 tt_set_conf_cmdid; + u32 fwtest_cmdid; + u32 vdev_atf_request_cmdid; + u32 peer_atf_request_cmdid; + u32 pdev_get_ani_cck_config_cmdid; + u32 pdev_get_ani_ofdm_config_cmdid; + u32 pdev_reserve_ast_entry_cmdid; + u32 pdev_get_nfcal_power_cmdid; + u32 pdev_get_tpc_cmdid; + u32 pdev_get_ast_info_cmdid; + u32 vdev_set_dscp_tid_map_cmdid; + u32 pdev_get_info_cmdid; + u32 vdev_get_info_cmdid; + u32 vdev_filter_neighbor_rx_packets_cmdid; + u32 mu_cal_start_cmdid; + u32 set_cca_params_cmdid; + u32 pdev_bss_chan_info_request_cmdid; + u32 pdev_enable_adaptive_cca_cmdid; + u32 ext_resource_cfg_cmdid; + u32 vdev_set_ie_cmdid; + u32 set_lteu_config_cmdid; + u32 atf_ssid_grouping_request_cmdid; + u32 peer_atf_ext_request_cmdid; + u32 set_periodic_channel_stats_cfg_cmdid; + u32 peer_bwf_request_cmdid; + u32 btcoex_cfg_cmdid; + u32 peer_tx_mu_txmit_count_cmdid; + u32 peer_tx_mu_txmit_rstcnt_cmdid; + u32 peer_gid_userpos_list_cmdid; + u32 pdev_check_cal_version_cmdid; + u32 coex_version_cfg_cmid; + u32 pdev_get_rx_filter_cmdid; + u32 pdev_extended_nss_cfg_cmdid; + u32 vdev_set_scan_nac_rssi_cmdid; + u32 prog_gpio_band_select_cmdid; + u32 config_smart_logging_cmdid; + u32 debug_fatal_condition_cmdid; + u32 get_tsf_timer_cmdid; + u32 pdev_get_tpc_table_cmdid; + u32 vdev_sifs_trigger_time_cmdid; + u32 pdev_wds_entry_list_cmdid; + u32 tdls_set_offchan_mode_cmdid; + u32 radar_found_cmdid; + u32 set_bb_timing_cmdid; + u32 per_peer_per_tid_config_cmdid; +}; /* * wmi command groups. @@ -247,7 +1087,9 @@ enum wmi_cmd_group { #define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1) #define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1) -/* Command IDs and commande events. */ +#define WMI_CMD_UNSUPPORTED 0 + +/* Command IDs and command events for MAIN FW. */ enum wmi_cmd_id { WMI_INIT_CMDID = 0x1, @@ -405,7 +1247,8 @@ enum wmi_cmd_id { WMI_STA_UAPSD_AUTO_TRIG_CMDID, /* STA Keep alive parameter configuration, - Requires WMI_SERVICE_STA_KEEP_ALIVE */ + * Requires WMI_SERVICE_STA_KEEP_ALIVE + */ WMI_STA_KEEPALIVE_CMD, /* misc command group */ @@ -416,6 +1259,7 @@ enum wmi_cmd_id { WMI_PDEV_FTM_INTG_CMDID, WMI_VDEV_SET_KEEPALIVE_CMDID, WMI_VDEV_GET_KEEPALIVE_CMDID, + WMI_FORCE_FW_HANG_CMDID, /* GPIO Configuration */ WMI_GPIO_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_GPIO), @@ -425,6 +1269,7 @@ enum wmi_cmd_id { enum wmi_event_id { WMI_SERVICE_READY_EVENTID = 0x1, WMI_READY_EVENTID, + WMI_SERVICE_AVAILABLE_EVENTID, /* Scan specific events */ WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN), @@ -487,6 +1332,644 @@ enum wmi_event_id { WMI_GPIO_INPUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GPIO), }; +/* Command IDs and command events for 10.X firmware */ +enum wmi_10x_cmd_id { + WMI_10X_START_CMDID = 0x9000, + WMI_10X_END_CMDID = 0x9FFF, + + /* initialize the wlan sub system */ + WMI_10X_INIT_CMDID, + + /* Scan specific commands */ + + WMI_10X_START_SCAN_CMDID = WMI_10X_START_CMDID, + WMI_10X_STOP_SCAN_CMDID, + WMI_10X_SCAN_CHAN_LIST_CMDID, + WMI_10X_ECHO_CMDID, + + /* PDEV(physical device) specific commands */ + WMI_10X_PDEV_SET_REGDOMAIN_CMDID, + WMI_10X_PDEV_SET_CHANNEL_CMDID, + WMI_10X_PDEV_SET_PARAM_CMDID, + WMI_10X_PDEV_PKTLOG_ENABLE_CMDID, + WMI_10X_PDEV_PKTLOG_DISABLE_CMDID, + WMI_10X_PDEV_SET_WMM_PARAMS_CMDID, + WMI_10X_PDEV_SET_HT_CAP_IE_CMDID, + WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID, + WMI_10X_PDEV_SET_BASE_MACADDR_CMDID, + WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID, + WMI_10X_PDEV_SET_QUIET_MODE_CMDID, + WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID, + WMI_10X_PDEV_GET_TPC_CONFIG_CMDID, + + /* VDEV(virtual device) specific commands */ + WMI_10X_VDEV_CREATE_CMDID, + WMI_10X_VDEV_DELETE_CMDID, + WMI_10X_VDEV_START_REQUEST_CMDID, + WMI_10X_VDEV_RESTART_REQUEST_CMDID, + WMI_10X_VDEV_UP_CMDID, + WMI_10X_VDEV_STOP_CMDID, + WMI_10X_VDEV_DOWN_CMDID, + WMI_10X_VDEV_STANDBY_RESPONSE_CMDID, + WMI_10X_VDEV_RESUME_RESPONSE_CMDID, + WMI_10X_VDEV_SET_PARAM_CMDID, + WMI_10X_VDEV_INSTALL_KEY_CMDID, + + /* peer specific commands */ + WMI_10X_PEER_CREATE_CMDID, + WMI_10X_PEER_DELETE_CMDID, + WMI_10X_PEER_FLUSH_TIDS_CMDID, + WMI_10X_PEER_SET_PARAM_CMDID, + WMI_10X_PEER_ASSOC_CMDID, + WMI_10X_PEER_ADD_WDS_ENTRY_CMDID, + WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID, + WMI_10X_PEER_MCAST_GROUP_CMDID, + + /* beacon/management specific commands */ + + WMI_10X_BCN_TX_CMDID, + WMI_10X_BCN_PRB_TMPL_CMDID, + WMI_10X_BCN_FILTER_RX_CMDID, + WMI_10X_PRB_REQ_FILTER_RX_CMDID, + WMI_10X_MGMT_TX_CMDID, + + /* commands to directly control ba negotiation directly from host. */ + WMI_10X_ADDBA_CLEAR_RESP_CMDID, + WMI_10X_ADDBA_SEND_CMDID, + WMI_10X_ADDBA_STATUS_CMDID, + WMI_10X_DELBA_SEND_CMDID, + WMI_10X_ADDBA_SET_RESP_CMDID, + WMI_10X_SEND_SINGLEAMSDU_CMDID, + + /* Station power save specific config */ + WMI_10X_STA_POWERSAVE_MODE_CMDID, + WMI_10X_STA_POWERSAVE_PARAM_CMDID, + WMI_10X_STA_MIMO_PS_MODE_CMDID, + + /* set debug log config */ + WMI_10X_DBGLOG_CFG_CMDID, + + /* DFS-specific commands */ + WMI_10X_PDEV_DFS_ENABLE_CMDID, + WMI_10X_PDEV_DFS_DISABLE_CMDID, + + /* QVIT specific command id */ + WMI_10X_PDEV_QVIT_CMDID, + + /* Offload Scan and Roaming related commands */ + WMI_10X_ROAM_SCAN_MODE, + WMI_10X_ROAM_SCAN_RSSI_THRESHOLD, + WMI_10X_ROAM_SCAN_PERIOD, + WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + WMI_10X_ROAM_AP_PROFILE, + WMI_10X_OFL_SCAN_ADD_AP_PROFILE, + WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE, + WMI_10X_OFL_SCAN_PERIOD, + + /* P2P specific commands */ + WMI_10X_P2P_DEV_SET_DEVICE_INFO, + WMI_10X_P2P_DEV_SET_DISCOVERABILITY, + WMI_10X_P2P_GO_SET_BEACON_IE, + WMI_10X_P2P_GO_SET_PROBE_RESP_IE, + + /* AP power save specific config */ + WMI_10X_AP_PS_PEER_PARAM_CMDID, + WMI_10X_AP_PS_PEER_UAPSD_COEX_CMDID, + + /* Rate-control specific commands */ + WMI_10X_PEER_RATE_RETRY_SCHED_CMDID, + + /* WLAN Profiling commands. */ + WMI_10X_WLAN_PROFILE_TRIGGER_CMDID, + WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, + + /* Suspend resume command Ids */ + WMI_10X_PDEV_SUSPEND_CMDID, + WMI_10X_PDEV_RESUME_CMDID, + + /* Beacon filter commands */ + WMI_10X_ADD_BCN_FILTER_CMDID, + WMI_10X_RMV_BCN_FILTER_CMDID, + + /* WOW Specific WMI commands*/ + WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID, + WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID, + WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, + WMI_10X_WOW_ENABLE_CMDID, + WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, + + /* RTT measurement related cmd */ + WMI_10X_RTT_MEASREQ_CMDID, + WMI_10X_RTT_TSF_CMDID, + + /* transmit beacon by value */ + WMI_10X_PDEV_SEND_BCN_CMDID, + + /* F/W stats */ + WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, + WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, + WMI_10X_REQUEST_STATS_CMDID, + + /* GPIO Configuration */ + WMI_10X_GPIO_CONFIG_CMDID, + WMI_10X_GPIO_OUTPUT_CMDID, + + WMI_10X_PDEV_UTF_CMDID = WMI_10X_END_CMDID - 1, +}; + +enum wmi_10x_event_id { + WMI_10X_SERVICE_READY_EVENTID = 0x8000, + WMI_10X_READY_EVENTID, + WMI_10X_START_EVENTID = 0x9000, + WMI_10X_END_EVENTID = 0x9FFF, + + /* Scan specific events */ + WMI_10X_SCAN_EVENTID = WMI_10X_START_EVENTID, + WMI_10X_ECHO_EVENTID, + WMI_10X_DEBUG_MESG_EVENTID, + WMI_10X_UPDATE_STATS_EVENTID, + + /* Instantaneous RSSI event */ + WMI_10X_INST_RSSI_STATS_EVENTID, + + /* VDEV specific events */ + WMI_10X_VDEV_START_RESP_EVENTID, + WMI_10X_VDEV_STANDBY_REQ_EVENTID, + WMI_10X_VDEV_RESUME_REQ_EVENTID, + WMI_10X_VDEV_STOPPED_EVENTID, + + /* peer specific events */ + WMI_10X_PEER_STA_KICKOUT_EVENTID, + + /* beacon/mgmt specific events */ + WMI_10X_HOST_SWBA_EVENTID, + WMI_10X_TBTTOFFSET_UPDATE_EVENTID, + WMI_10X_MGMT_RX_EVENTID, + + /* Channel stats event */ + WMI_10X_CHAN_INFO_EVENTID, + + /* PHY Error specific WMI event */ + WMI_10X_PHYERR_EVENTID, + + /* Roam event to trigger roaming on host */ + WMI_10X_ROAM_EVENTID, + + /* matching AP found from list of profiles */ + WMI_10X_PROFILE_MATCH, + + /* debug print message used for tracing FW code while debugging */ + WMI_10X_DEBUG_PRINT_EVENTID, + /* VI spoecific event */ + WMI_10X_PDEV_QVIT_EVENTID, + /* FW code profile data in response to profile request */ + WMI_10X_WLAN_PROFILE_DATA_EVENTID, + + /*RTT related event ID*/ + WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID, + WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID, + WMI_10X_RTT_ERROR_REPORT_EVENTID, + + WMI_10X_WOW_WAKEUP_HOST_EVENTID, + WMI_10X_DCS_INTERFERENCE_EVENTID, + + /* TPC config for the current operating channel */ + WMI_10X_PDEV_TPC_CONFIG_EVENTID, + + WMI_10X_GPIO_INPUT_EVENTID, + WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID - 1, +}; + +enum wmi_10_2_cmd_id { + WMI_10_2_START_CMDID = 0x9000, + WMI_10_2_END_CMDID = 0x9FFF, + WMI_10_2_INIT_CMDID, + WMI_10_2_START_SCAN_CMDID = WMI_10_2_START_CMDID, + WMI_10_2_STOP_SCAN_CMDID, + WMI_10_2_SCAN_CHAN_LIST_CMDID, + WMI_10_2_ECHO_CMDID, + WMI_10_2_PDEV_SET_REGDOMAIN_CMDID, + WMI_10_2_PDEV_SET_CHANNEL_CMDID, + WMI_10_2_PDEV_SET_PARAM_CMDID, + WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID, + WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID, + WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID, + WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID, + WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID, + WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID, + WMI_10_2_PDEV_SET_QUIET_MODE_CMDID, + WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID, + WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID, + WMI_10_2_VDEV_CREATE_CMDID, + WMI_10_2_VDEV_DELETE_CMDID, + WMI_10_2_VDEV_START_REQUEST_CMDID, + WMI_10_2_VDEV_RESTART_REQUEST_CMDID, + WMI_10_2_VDEV_UP_CMDID, + WMI_10_2_VDEV_STOP_CMDID, + WMI_10_2_VDEV_DOWN_CMDID, + WMI_10_2_VDEV_STANDBY_RESPONSE_CMDID, + WMI_10_2_VDEV_RESUME_RESPONSE_CMDID, + WMI_10_2_VDEV_SET_PARAM_CMDID, + WMI_10_2_VDEV_INSTALL_KEY_CMDID, + WMI_10_2_VDEV_SET_DSCP_TID_MAP_CMDID, + WMI_10_2_PEER_CREATE_CMDID, + WMI_10_2_PEER_DELETE_CMDID, + WMI_10_2_PEER_FLUSH_TIDS_CMDID, + WMI_10_2_PEER_SET_PARAM_CMDID, + WMI_10_2_PEER_ASSOC_CMDID, + WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID, + WMI_10_2_PEER_UPDATE_WDS_ENTRY_CMDID, + WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID, + WMI_10_2_PEER_MCAST_GROUP_CMDID, + WMI_10_2_BCN_TX_CMDID, + WMI_10_2_BCN_PRB_TMPL_CMDID, + WMI_10_2_BCN_FILTER_RX_CMDID, + WMI_10_2_PRB_REQ_FILTER_RX_CMDID, + WMI_10_2_MGMT_TX_CMDID, + WMI_10_2_ADDBA_CLEAR_RESP_CMDID, + WMI_10_2_ADDBA_SEND_CMDID, + WMI_10_2_ADDBA_STATUS_CMDID, + WMI_10_2_DELBA_SEND_CMDID, + WMI_10_2_ADDBA_SET_RESP_CMDID, + WMI_10_2_SEND_SINGLEAMSDU_CMDID, + WMI_10_2_STA_POWERSAVE_MODE_CMDID, + WMI_10_2_STA_POWERSAVE_PARAM_CMDID, + WMI_10_2_STA_MIMO_PS_MODE_CMDID, + WMI_10_2_DBGLOG_CFG_CMDID, + WMI_10_2_PDEV_DFS_ENABLE_CMDID, + WMI_10_2_PDEV_DFS_DISABLE_CMDID, + WMI_10_2_PDEV_QVIT_CMDID, + WMI_10_2_ROAM_SCAN_MODE, + WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD, + WMI_10_2_ROAM_SCAN_PERIOD, + WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + WMI_10_2_ROAM_AP_PROFILE, + WMI_10_2_OFL_SCAN_ADD_AP_PROFILE, + WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE, + WMI_10_2_OFL_SCAN_PERIOD, + WMI_10_2_P2P_DEV_SET_DEVICE_INFO, + WMI_10_2_P2P_DEV_SET_DISCOVERABILITY, + WMI_10_2_P2P_GO_SET_BEACON_IE, + WMI_10_2_P2P_GO_SET_PROBE_RESP_IE, + WMI_10_2_AP_PS_PEER_PARAM_CMDID, + WMI_10_2_AP_PS_PEER_UAPSD_COEX_CMDID, + WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID, + WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID, + WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, + WMI_10_2_PDEV_SUSPEND_CMDID, + WMI_10_2_PDEV_RESUME_CMDID, + WMI_10_2_ADD_BCN_FILTER_CMDID, + WMI_10_2_RMV_BCN_FILTER_CMDID, + WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID, + WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID, + WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, + WMI_10_2_WOW_ENABLE_CMDID, + WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, + WMI_10_2_RTT_MEASREQ_CMDID, + WMI_10_2_RTT_TSF_CMDID, + WMI_10_2_RTT_KEEPALIVE_CMDID, + WMI_10_2_PDEV_SEND_BCN_CMDID, + WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, + WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, + WMI_10_2_REQUEST_STATS_CMDID, + WMI_10_2_GPIO_CONFIG_CMDID, + WMI_10_2_GPIO_OUTPUT_CMDID, + WMI_10_2_VDEV_RATEMASK_CMDID, + WMI_10_2_PDEV_SMART_ANT_ENABLE_CMDID, + WMI_10_2_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID, + WMI_10_2_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID, + WMI_10_2_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID, + WMI_10_2_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID, + WMI_10_2_FORCE_FW_HANG_CMDID, + WMI_10_2_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID, + WMI_10_2_PDEV_SET_CTL_TABLE_CMDID, + WMI_10_2_PDEV_SET_MIMOGAIN_TABLE_CMDID, + WMI_10_2_PDEV_RATEPWR_TABLE_CMDID, + WMI_10_2_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID, + WMI_10_2_PDEV_GET_INFO, + WMI_10_2_VDEV_GET_INFO, + WMI_10_2_VDEV_ATF_REQUEST_CMDID, + WMI_10_2_PEER_ATF_REQUEST_CMDID, + WMI_10_2_PDEV_GET_TEMPERATURE_CMDID, + WMI_10_2_MU_CAL_START_CMDID, + WMI_10_2_SET_LTEU_CONFIG_CMDID, + WMI_10_2_SET_CCA_PARAMS, + WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, + WMI_10_2_FWTEST_CMDID, + WMI_10_2_PDEV_SET_BB_TIMING_CONFIG_CMDID, + WMI_10_2_PDEV_UTF_CMDID = WMI_10_2_END_CMDID - 1, +}; + +enum wmi_10_2_event_id { + WMI_10_2_SERVICE_READY_EVENTID = 0x8000, + WMI_10_2_READY_EVENTID, + WMI_10_2_DEBUG_MESG_EVENTID, + WMI_10_2_START_EVENTID = 0x9000, + WMI_10_2_END_EVENTID = 0x9FFF, + WMI_10_2_SCAN_EVENTID = WMI_10_2_START_EVENTID, + WMI_10_2_ECHO_EVENTID, + WMI_10_2_UPDATE_STATS_EVENTID, + WMI_10_2_INST_RSSI_STATS_EVENTID, + WMI_10_2_VDEV_START_RESP_EVENTID, + WMI_10_2_VDEV_STANDBY_REQ_EVENTID, + WMI_10_2_VDEV_RESUME_REQ_EVENTID, + WMI_10_2_VDEV_STOPPED_EVENTID, + WMI_10_2_PEER_STA_KICKOUT_EVENTID, + WMI_10_2_HOST_SWBA_EVENTID, + WMI_10_2_TBTTOFFSET_UPDATE_EVENTID, + WMI_10_2_MGMT_RX_EVENTID, + WMI_10_2_CHAN_INFO_EVENTID, + WMI_10_2_PHYERR_EVENTID, + WMI_10_2_ROAM_EVENTID, + WMI_10_2_PROFILE_MATCH, + WMI_10_2_DEBUG_PRINT_EVENTID, + WMI_10_2_PDEV_QVIT_EVENTID, + WMI_10_2_WLAN_PROFILE_DATA_EVENTID, + WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID, + WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID, + WMI_10_2_RTT_ERROR_REPORT_EVENTID, + WMI_10_2_RTT_KEEPALIVE_EVENTID, + WMI_10_2_WOW_WAKEUP_HOST_EVENTID, + WMI_10_2_DCS_INTERFERENCE_EVENTID, + WMI_10_2_PDEV_TPC_CONFIG_EVENTID, + WMI_10_2_GPIO_INPUT_EVENTID, + WMI_10_2_PEER_RATECODE_LIST_EVENTID, + WMI_10_2_GENERIC_BUFFER_EVENTID, + WMI_10_2_MCAST_BUF_RELEASE_EVENTID, + WMI_10_2_MCAST_LIST_AGEOUT_EVENTID, + WMI_10_2_WDS_PEER_EVENTID, + WMI_10_2_PEER_STA_PS_STATECHG_EVENTID, + WMI_10_2_PDEV_TEMPERATURE_EVENTID, + WMI_10_2_MU_REPORT_EVENTID, + WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID, + WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1, +}; + +enum wmi_10_4_cmd_id { + WMI_10_4_START_CMDID = 0x9000, + WMI_10_4_END_CMDID = 0x9FFF, + WMI_10_4_INIT_CMDID, + WMI_10_4_START_SCAN_CMDID = WMI_10_4_START_CMDID, + WMI_10_4_STOP_SCAN_CMDID, + WMI_10_4_SCAN_CHAN_LIST_CMDID, + WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID, + WMI_10_4_SCAN_UPDATE_REQUEST_CMDID, + WMI_10_4_ECHO_CMDID, + WMI_10_4_PDEV_SET_REGDOMAIN_CMDID, + WMI_10_4_PDEV_SET_CHANNEL_CMDID, + WMI_10_4_PDEV_SET_PARAM_CMDID, + WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID, + WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID, + WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID, + WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID, + WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID, + WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID, + WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID, + WMI_10_4_PDEV_SET_QUIET_MODE_CMDID, + WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID, + WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID, + WMI_10_4_VDEV_CREATE_CMDID, + WMI_10_4_VDEV_DELETE_CMDID, + WMI_10_4_VDEV_START_REQUEST_CMDID, + WMI_10_4_VDEV_RESTART_REQUEST_CMDID, + WMI_10_4_VDEV_UP_CMDID, + WMI_10_4_VDEV_STOP_CMDID, + WMI_10_4_VDEV_DOWN_CMDID, + WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID, + WMI_10_4_VDEV_RESUME_RESPONSE_CMDID, + WMI_10_4_VDEV_SET_PARAM_CMDID, + WMI_10_4_VDEV_INSTALL_KEY_CMDID, + WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID, + WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID, + WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID, + WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID, + WMI_10_4_PEER_CREATE_CMDID, + WMI_10_4_PEER_DELETE_CMDID, + WMI_10_4_PEER_FLUSH_TIDS_CMDID, + WMI_10_4_PEER_SET_PARAM_CMDID, + WMI_10_4_PEER_ASSOC_CMDID, + WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID, + WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID, + WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID, + WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID, + WMI_10_4_PEER_MCAST_GROUP_CMDID, + WMI_10_4_BCN_TX_CMDID, + WMI_10_4_PDEV_SEND_BCN_CMDID, + WMI_10_4_BCN_PRB_TMPL_CMDID, + WMI_10_4_BCN_FILTER_RX_CMDID, + WMI_10_4_PRB_REQ_FILTER_RX_CMDID, + WMI_10_4_MGMT_TX_CMDID, + WMI_10_4_PRB_TMPL_CMDID, + WMI_10_4_ADDBA_CLEAR_RESP_CMDID, + WMI_10_4_ADDBA_SEND_CMDID, + WMI_10_4_ADDBA_STATUS_CMDID, + WMI_10_4_DELBA_SEND_CMDID, + WMI_10_4_ADDBA_SET_RESP_CMDID, + WMI_10_4_SEND_SINGLEAMSDU_CMDID, + WMI_10_4_STA_POWERSAVE_MODE_CMDID, + WMI_10_4_STA_POWERSAVE_PARAM_CMDID, + WMI_10_4_STA_MIMO_PS_MODE_CMDID, + WMI_10_4_DBGLOG_CFG_CMDID, + WMI_10_4_PDEV_DFS_ENABLE_CMDID, + WMI_10_4_PDEV_DFS_DISABLE_CMDID, + WMI_10_4_PDEV_QVIT_CMDID, + WMI_10_4_ROAM_SCAN_MODE, + WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD, + WMI_10_4_ROAM_SCAN_PERIOD, + WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + WMI_10_4_ROAM_AP_PROFILE, + WMI_10_4_OFL_SCAN_ADD_AP_PROFILE, + WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE, + WMI_10_4_OFL_SCAN_PERIOD, + WMI_10_4_P2P_DEV_SET_DEVICE_INFO, + WMI_10_4_P2P_DEV_SET_DISCOVERABILITY, + WMI_10_4_P2P_GO_SET_BEACON_IE, + WMI_10_4_P2P_GO_SET_PROBE_RESP_IE, + WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID, + WMI_10_4_AP_PS_PEER_PARAM_CMDID, + WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID, + WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID, + WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID, + WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, + WMI_10_4_PDEV_SUSPEND_CMDID, + WMI_10_4_PDEV_RESUME_CMDID, + WMI_10_4_ADD_BCN_FILTER_CMDID, + WMI_10_4_RMV_BCN_FILTER_CMDID, + WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID, + WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID, + WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, + WMI_10_4_WOW_ENABLE_CMDID, + WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, + WMI_10_4_RTT_MEASREQ_CMDID, + WMI_10_4_RTT_TSF_CMDID, + WMI_10_4_RTT_KEEPALIVE_CMDID, + WMI_10_4_OEM_REQ_CMDID, + WMI_10_4_NAN_CMDID, + WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, + WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, + WMI_10_4_REQUEST_STATS_CMDID, + WMI_10_4_GPIO_CONFIG_CMDID, + WMI_10_4_GPIO_OUTPUT_CMDID, + WMI_10_4_VDEV_RATEMASK_CMDID, + WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID, + WMI_10_4_GTK_OFFLOAD_CMDID, + WMI_10_4_QBOOST_CFG_CMDID, + WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID, + WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID, + WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID, + WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID, + WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID, + WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID, + WMI_10_4_VDEV_SET_KEEPALIVE_CMDID, + WMI_10_4_VDEV_GET_KEEPALIVE_CMDID, + WMI_10_4_FORCE_FW_HANG_CMDID, + WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID, + WMI_10_4_PDEV_SET_CTL_TABLE_CMDID, + WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID, + WMI_10_4_PDEV_RATEPWR_TABLE_CMDID, + WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID, + WMI_10_4_PDEV_FIPS_CMDID, + WMI_10_4_TT_SET_CONF_CMDID, + WMI_10_4_FWTEST_CMDID, + WMI_10_4_VDEV_ATF_REQUEST_CMDID, + WMI_10_4_PEER_ATF_REQUEST_CMDID, + WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID, + WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID, + WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID, + WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID, + WMI_10_4_PDEV_GET_TPC_CMDID, + WMI_10_4_PDEV_GET_AST_INFO_CMDID, + WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID, + WMI_10_4_PDEV_GET_TEMPERATURE_CMDID, + WMI_10_4_PDEV_GET_INFO_CMDID, + WMI_10_4_VDEV_GET_INFO_CMDID, + WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID, + WMI_10_4_MU_CAL_START_CMDID, + WMI_10_4_SET_CCA_PARAMS_CMDID, + WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, + WMI_10_4_EXT_RESOURCE_CFG_CMDID, + WMI_10_4_VDEV_SET_IE_CMDID, + WMI_10_4_SET_LTEU_CONFIG_CMDID, + WMI_10_4_ATF_SSID_GROUPING_REQUEST_CMDID, + WMI_10_4_PEER_ATF_EXT_REQUEST_CMDID, + WMI_10_4_SET_PERIODIC_CHANNEL_STATS_CONFIG, + WMI_10_4_PEER_BWF_REQUEST_CMDID, + WMI_10_4_BTCOEX_CFG_CMDID, + WMI_10_4_PEER_TX_MU_TXMIT_COUNT_CMDID, + WMI_10_4_PEER_TX_MU_TXMIT_RSTCNT_CMDID, + WMI_10_4_PEER_GID_USERPOS_LIST_CMDID, + WMI_10_4_PDEV_CHECK_CAL_VERSION_CMDID, + WMI_10_4_COEX_VERSION_CFG_CMID, + WMI_10_4_PDEV_GET_RX_FILTER_CMDID, + WMI_10_4_PDEV_EXTENDED_NSS_CFG_CMDID, + WMI_10_4_VDEV_SET_SCAN_NAC_RSSI_CMDID, + WMI_10_4_PROG_GPIO_BAND_SELECT_CMDID, + WMI_10_4_CONFIG_SMART_LOGGING_CMDID, + WMI_10_4_DEBUG_FATAL_CONDITION_CMDID, + WMI_10_4_GET_TSF_TIMER_CMDID, + WMI_10_4_PDEV_GET_TPC_TABLE_CMDID, + WMI_10_4_VDEV_SIFS_TRIGGER_TIME_CMDID, + WMI_10_4_PDEV_WDS_ENTRY_LIST_CMDID, + WMI_10_4_TDLS_SET_STATE_CMDID, + WMI_10_4_TDLS_PEER_UPDATE_CMDID, + WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID, + WMI_10_4_PDEV_SEND_FD_CMDID, + WMI_10_4_ENABLE_FILS_CMDID, + WMI_10_4_PDEV_SET_BRIDGE_MACADDR_CMDID, + WMI_10_4_ATF_GROUP_WMM_AC_CONFIG_REQUEST_CMDID, + WMI_10_4_RADAR_FOUND_CMDID, + WMI_10_4_PEER_CFR_CAPTURE_CMDID, + WMI_10_4_PER_PEER_PER_TID_CONFIG_CMDID, + WMI_10_4_PDEV_UTF_CMDID = WMI_10_4_END_CMDID - 1, +}; + +enum wmi_10_4_event_id { + WMI_10_4_SERVICE_READY_EVENTID = 0x8000, + WMI_10_4_READY_EVENTID, + WMI_10_4_DEBUG_MESG_EVENTID, + WMI_10_4_START_EVENTID = 0x9000, + WMI_10_4_END_EVENTID = 0x9FFF, + WMI_10_4_SCAN_EVENTID = WMI_10_4_START_EVENTID, + WMI_10_4_ECHO_EVENTID, + WMI_10_4_UPDATE_STATS_EVENTID, + WMI_10_4_INST_RSSI_STATS_EVENTID, + WMI_10_4_VDEV_START_RESP_EVENTID, + WMI_10_4_VDEV_STANDBY_REQ_EVENTID, + WMI_10_4_VDEV_RESUME_REQ_EVENTID, + WMI_10_4_VDEV_STOPPED_EVENTID, + WMI_10_4_PEER_STA_KICKOUT_EVENTID, + WMI_10_4_HOST_SWBA_EVENTID, + WMI_10_4_TBTTOFFSET_UPDATE_EVENTID, + WMI_10_4_MGMT_RX_EVENTID, + WMI_10_4_CHAN_INFO_EVENTID, + WMI_10_4_PHYERR_EVENTID, + WMI_10_4_ROAM_EVENTID, + WMI_10_4_PROFILE_MATCH, + WMI_10_4_DEBUG_PRINT_EVENTID, + WMI_10_4_PDEV_QVIT_EVENTID, + WMI_10_4_WLAN_PROFILE_DATA_EVENTID, + WMI_10_4_RTT_MEASUREMENT_REPORT_EVENTID, + WMI_10_4_TSF_MEASUREMENT_REPORT_EVENTID, + WMI_10_4_RTT_ERROR_REPORT_EVENTID, + WMI_10_4_RTT_KEEPALIVE_EVENTID, + WMI_10_4_OEM_CAPABILITY_EVENTID, + WMI_10_4_OEM_MEASUREMENT_REPORT_EVENTID, + WMI_10_4_OEM_ERROR_REPORT_EVENTID, + WMI_10_4_NAN_EVENTID, + WMI_10_4_WOW_WAKEUP_HOST_EVENTID, + WMI_10_4_GTK_OFFLOAD_STATUS_EVENTID, + WMI_10_4_GTK_REKEY_FAIL_EVENTID, + WMI_10_4_DCS_INTERFERENCE_EVENTID, + WMI_10_4_PDEV_TPC_CONFIG_EVENTID, + WMI_10_4_CSA_HANDLING_EVENTID, + WMI_10_4_GPIO_INPUT_EVENTID, + WMI_10_4_PEER_RATECODE_LIST_EVENTID, + WMI_10_4_GENERIC_BUFFER_EVENTID, + WMI_10_4_MCAST_BUF_RELEASE_EVENTID, + WMI_10_4_MCAST_LIST_AGEOUT_EVENTID, + WMI_10_4_VDEV_GET_KEEPALIVE_EVENTID, + WMI_10_4_WDS_PEER_EVENTID, + WMI_10_4_PEER_STA_PS_STATECHG_EVENTID, + WMI_10_4_PDEV_FIPS_EVENTID, + WMI_10_4_TT_STATS_EVENTID, + WMI_10_4_PDEV_CHANNEL_HOPPING_EVENTID, + WMI_10_4_PDEV_ANI_CCK_LEVEL_EVENTID, + WMI_10_4_PDEV_ANI_OFDM_LEVEL_EVENTID, + WMI_10_4_PDEV_RESERVE_AST_ENTRY_EVENTID, + WMI_10_4_PDEV_NFCAL_POWER_EVENTID, + WMI_10_4_PDEV_TPC_EVENTID, + WMI_10_4_PDEV_GET_AST_INFO_EVENTID, + WMI_10_4_PDEV_TEMPERATURE_EVENTID, + WMI_10_4_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID, + WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID, + WMI_10_4_MU_REPORT_EVENTID, + WMI_10_4_TX_DATA_TRAFFIC_CTRL_EVENTID, + WMI_10_4_PEER_TX_MU_TXMIT_COUNT_EVENTID, + WMI_10_4_PEER_GID_USERPOS_LIST_EVENTID, + WMI_10_4_PDEV_CHECK_CAL_VERSION_EVENTID, + WMI_10_4_ATF_PEER_STATS_EVENTID, + WMI_10_4_PDEV_GET_RX_FILTER_EVENTID, + WMI_10_4_NAC_RSSI_EVENTID, + WMI_10_4_DEBUG_FATAL_CONDITION_EVENTID, + WMI_10_4_GET_TSF_TIMER_RESP_EVENTID, + WMI_10_4_PDEV_TPC_TABLE_EVENTID, + WMI_10_4_PDEV_WDS_ENTRY_LIST_EVENTID, + WMI_10_4_TDLS_PEER_EVENTID, + WMI_10_4_HOST_SWFDA_EVENTID, + WMI_10_4_ESP_ESTIMATE_EVENTID, + WMI_10_4_DFS_STATUS_CHECK_EVENTID, + WMI_10_4_PDEV_UTF_EVENTID = WMI_10_4_END_EVENTID - 1, +}; + enum wmi_phy_mode { MODE_11A = 0, /* 11a Mode */ MODE_11G = 1, /* 11b/g Mode */ @@ -503,10 +1986,59 @@ enum wmi_phy_mode { MODE_11AC_VHT20_2G = 11, MODE_11AC_VHT40_2G = 12, MODE_11AC_VHT80_2G = 13, - MODE_UNKNOWN = 14, - MODE_MAX = 14 + MODE_11AC_VHT80_80 = 14, + MODE_11AC_VHT160 = 15, + MODE_UNKNOWN = 16, + MODE_MAX = 16 }; +static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode) +{ + switch (mode) { + case MODE_11A: + return "11a"; + case MODE_11G: + return "11g"; + case MODE_11B: + return "11b"; + case MODE_11GONLY: + return "11gonly"; + case MODE_11NA_HT20: + return "11na-ht20"; + case MODE_11NG_HT20: + return "11ng-ht20"; + case MODE_11NA_HT40: + return "11na-ht40"; + case MODE_11NG_HT40: + return "11ng-ht40"; + case MODE_11AC_VHT20: + return "11ac-vht20"; + case MODE_11AC_VHT40: + return "11ac-vht40"; + case MODE_11AC_VHT80: + return "11ac-vht80"; + case MODE_11AC_VHT160: + return "11ac-vht160"; + case MODE_11AC_VHT80_80: + return "11ac-vht80+80"; + case MODE_11AC_VHT20_2G: + return "11ac-vht20-2g"; + case MODE_11AC_VHT40_2G: + return "11ac-vht40-2g"; + case MODE_11AC_VHT80_2G: + return "11ac-vht80-2g"; + case MODE_UNKNOWN: + /* skip */ + break; + + /* no default handler to allow compiler to check that the + * enum is fully handled + */ + } + + return "<unknown>"; +} + #define WMI_CHAN_LIST_TAG 0x1 #define WMI_SSID_LIST_TAG 0x2 #define WMI_BSSID_LIST_TAG 0x3 @@ -525,6 +2057,7 @@ struct wmi_channel { union { __le32 reginfo0; struct { + /* note: power unit is 0.5 dBm */ u8 min_power; u8 max_power; u8 reg_power; @@ -534,7 +2067,10 @@ struct wmi_channel { union { __le32 reginfo1; struct { + /* note: power unit is 1 dBm */ u8 antenna_max; + /* note: power unit is 0.5 dBm */ + u8 max_tx_power; } __packed; } __packed; } __packed; @@ -542,15 +2078,18 @@ struct wmi_channel { struct wmi_channel_arg { u32 freq; u32 band_center_freq1; + u32 band_center_freq2; bool passive; bool allow_ibss; bool allow_ht; bool allow_vht; bool ht40plus; - /* note: power unit is 1/4th of dBm */ + bool chan_radar; + /* note: power unit is 0.5 dBm */ u32 min_power; u32 max_power; u32 max_reg_power; + /* note: power unit is 1 dBm */ u32 max_antenna_gain; u32 reg_class_id; enum wmi_phy_mode mode; @@ -571,8 +2110,9 @@ enum wmi_channel_change_cause { /* Indicate reason for channel switch */ #define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13) - -#define WMI_MAX_SPATIAL_STREAM 3 +/* DFS required on channel for 2nd segment of VHT160 and VHT80+80*/ +#define WMI_CHAN_FLAG_DFS_CFREQ2 (1 << 15) +#define WMI_MAX_SPATIAL_STREAM 3 /* default max ss */ /* HT Capabilities*/ #define WMI_HT_CAP_ENABLED 0x0001 /* HT Enabled/ disabled */ @@ -587,6 +2127,8 @@ enum wmi_channel_change_cause { #define WMI_HT_CAP_MPDU_DENSITY 0x0700 /* MPDU Density */ #define WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT 8 #define WMI_HT_CAP_HT40_SGI 0x0800 +#define WMI_HT_CAP_RX_LDPC 0x1000 /* LDPC RX support */ +#define WMI_HT_CAP_TX_LDPC 0x2000 /* LDPC TX support */ #define WMI_HT_CAP_DEFAULT_ALL (WMI_HT_CAP_ENABLED | \ WMI_HT_CAP_HT20_SGI | \ @@ -595,7 +2137,6 @@ enum wmi_channel_change_cause { WMI_HT_CAP_RX_STBC | \ WMI_HT_CAP_LDPC) - /* * WMI_VHT_CAP_* these maps to ieee 802.11ac vht capability information * field. The fields not defined here are not supported, or reserved. @@ -606,9 +2147,18 @@ enum wmi_channel_change_cause { #define WMI_VHT_CAP_MAX_MPDU_LEN_MASK 0x00000003 #define WMI_VHT_CAP_RX_LDPC 0x00000010 #define WMI_VHT_CAP_SGI_80MHZ 0x00000020 +#define WMI_VHT_CAP_SGI_160MHZ 0x00000040 #define WMI_VHT_CAP_TX_STBC 0x00000080 #define WMI_VHT_CAP_RX_STBC_MASK 0x00000300 #define WMI_VHT_CAP_RX_STBC_MASK_SHIFT 8 +#define WMI_VHT_CAP_SU_BFER 0x00000800 +#define WMI_VHT_CAP_SU_BFEE 0x00001000 +#define WMI_VHT_CAP_MAX_CS_ANT_MASK 0x0000E000 +#define WMI_VHT_CAP_MAX_CS_ANT_MASK_SHIFT 13 +#define WMI_VHT_CAP_MAX_SND_DIM_MASK 0x00070000 +#define WMI_VHT_CAP_MAX_SND_DIM_MASK_SHIFT 16 +#define WMI_VHT_CAP_MU_BFER 0x00080000 +#define WMI_VHT_CAP_MU_BFEE 0x00100000 #define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP 0x03800000 #define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIFT 23 #define WMI_VHT_CAP_RX_FIXED_ANT 0x10000000 @@ -657,6 +2207,8 @@ enum { REGDMN_MODE_11AC_VHT40PLUS = 0x40000, /* 5Ghz, VHT40 + channels */ REGDMN_MODE_11AC_VHT40MINUS = 0x80000, /* 5Ghz VHT40 - channels */ REGDMN_MODE_11AC_VHT80 = 0x100000, /* 5Ghz, VHT80 channels */ + REGDMN_MODE_11AC_VHT160 = 0x200000, /* 5Ghz, VHT160 channels */ + REGDMN_MODE_11AC_VHT80_80 = 0x400000, /* 5Ghz, VHT80+80 channels */ REGDMN_MODE_ALL = 0xffffffff }; @@ -716,10 +2268,6 @@ struct wlan_host_mem_req { __le32 num_units; } __packed; -#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id) \ - ((((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \ - (1 << ((svc_id)%(sizeof(u32))))) != 0) - /* * The following struct holds optional payload for * wmi_service_ready_event,e.g., 11ac pass some of the @@ -733,7 +2281,7 @@ struct wmi_service_ready_event { __le32 phy_capability; /* Maximum number of frag table entries that SW will populate less 1 */ __le32 max_frag_entry; - __le32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE]; + __le32 wmi_service_bitmap[16]; __le32 num_rf_chains; /* * The following field is only valid for service type @@ -759,19 +2307,50 @@ struct wmi_service_ready_event { * where FW can access this memory directly (or) by DMA. */ __le32 num_mem_reqs; - struct wlan_host_mem_req mem_reqs[1]; + struct wlan_host_mem_req mem_reqs[]; } __packed; -/* - * status consists of upper 16 bits fo int status and lower 16 bits of - * module ID that retuned status - */ -#define WLAN_INIT_STATUS_SUCCESS 0x0 -#define WLAN_GET_INIT_STATUS_REASON(status) ((status) & 0xffff) -#define WLAN_GET_INIT_STATUS_MODULE_ID(status) (((status) >> 16) & 0xffff) +/* This is the definition from 10.X firmware branch */ +struct wmi_10x_service_ready_event { + __le32 sw_version; + __le32 abi_version; + + /* WMI_PHY_CAPABILITY */ + __le32 phy_capability; + + /* Maximum number of frag table entries that SW will populate less 1 */ + __le32 max_frag_entry; + __le32 wmi_service_bitmap[16]; + __le32 num_rf_chains; + + /* + * The following field is only valid for service type + * WMI_SERVICE_11AC + */ + __le32 ht_cap_info; /* WMI HT Capability */ + __le32 vht_cap_info; /* VHT capability info field of 802.11ac */ + __le32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */ + __le32 hw_min_tx_power; + __le32 hw_max_tx_power; + + struct hal_reg_capabilities hal_reg_capabilities; -#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ) -#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ) + __le32 sys_cap_info; + __le32 min_pkt_size_enable; /* Enterprise mode short pkt enable */ + + /* + * request to host to allocate a chuck of memory and pss it down to FW + * via WM_INIT. FW uses this as FW extesnsion memory for saving its + * data structures. Only valid for low latency interfaces like PCIE + * where FW can access this memory directly (or) by DMA. + */ + __le32 num_mem_reqs; + + struct wlan_host_mem_req mem_reqs[]; +} __packed; + +#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ) +#define WMI_UNIFIED_READY_TIMEOUT_HZ (5 * HZ) struct wmi_ready_event { __le32 sw_version; @@ -791,7 +2370,7 @@ struct wmi_resource_config { * In offload mode target supports features like WOW, chatter and * other protocol offloads. In order to support them some * functionalities like reorder buffering, PN checking need to be - * done in target. This determines maximum number of peers suported + * done in target. This determines maximum number of peers supported * by target in offload mode */ __le32 num_offload_peers; @@ -863,7 +2442,7 @@ struct wmi_resource_config { */ __le32 rx_decap_mode; - /* what is the maximum scan requests than can be queued */ + /* what is the maximum number of scan requests that can be queued */ __le32 scan_max_pending_reqs; /* maximum VDEV that could use BMISS offload */ @@ -972,12 +2551,548 @@ struct wmi_resource_config { * Max. number of Tx fragments per MSDU * This parameter controls the max number of Tx fragments per MSDU. * This is sent by the target as part of the WMI_SERVICE_READY event - * and is overriden by the OS shim as required. + * and is overridden by the OS shim as required. */ __le32 max_frag_entries; } __packed; -/* strucutre describing host memory chunk. */ +struct wmi_resource_config_10x { + /* number of virtual devices (VAPs) to support */ + __le32 num_vdevs; + + /* number of peer nodes to support */ + __le32 num_peers; + + /* number of keys per peer */ + __le32 num_peer_keys; + + /* total number of TX/RX data TIDs */ + __le32 num_tids; + + /* + * max skid for resolving hash collisions + * + * The address search table is sparse, so that if two MAC addresses + * result in the same hash value, the second of these conflicting + * entries can slide to the next index in the address search table, + * and use it, if it is unoccupied. This ast_skid_limit parameter + * specifies the upper bound on how many subsequent indices to search + * over to find an unoccupied space. + */ + __le32 ast_skid_limit; + + /* + * the nominal chain mask for transmit + * + * The chain mask may be modified dynamically, e.g. to operate AP + * tx with a reduced number of chains if no clients are associated. + * This configuration parameter specifies the nominal chain-mask that + * should be used when not operating with a reduced set of tx chains. + */ + __le32 tx_chain_mask; + + /* + * the nominal chain mask for receive + * + * The chain mask may be modified dynamically, e.g. for a client + * to use a reduced number of chains for receive if the traffic to + * the client is low enough that it doesn't require downlink MIMO + * or antenna diversity. + * This configuration parameter specifies the nominal chain-mask that + * should be used when not operating with a reduced set of rx chains. + */ + __le32 rx_chain_mask; + + /* + * what rx reorder timeout (ms) to use for the AC + * + * Each WMM access class (voice, video, best-effort, background) will + * have its own timeout value to dictate how long to wait for missing + * rx MPDUs to arrive before flushing subsequent MPDUs that have + * already been received. + * This parameter specifies the timeout in milliseconds for each + * class. + */ + __le32 rx_timeout_pri_vi; + __le32 rx_timeout_pri_vo; + __le32 rx_timeout_pri_be; + __le32 rx_timeout_pri_bk; + + /* + * what mode the rx should decap packets to + * + * MAC can decap to RAW (no decap), native wifi or Ethernet types + * THis setting also determines the default TX behavior, however TX + * behavior can be modified on a per VAP basis during VAP init + */ + __le32 rx_decap_mode; + + /* what is the maximum number of scan requests that can be queued */ + __le32 scan_max_pending_reqs; + + /* maximum VDEV that could use BMISS offload */ + __le32 bmiss_offload_max_vdev; + + /* maximum VDEV that could use offload roaming */ + __le32 roam_offload_max_vdev; + + /* maximum AP profiles that would push to offload roaming */ + __le32 roam_offload_max_ap_profiles; + + /* + * how many groups to use for mcast->ucast conversion + * + * The target's WAL maintains a table to hold information regarding + * which peers belong to a given multicast group, so that if + * multicast->unicast conversion is enabled, the target can convert + * multicast tx frames to a series of unicast tx frames, to each + * peer within the multicast group. + This num_mcast_groups configuration parameter tells the target how + * many multicast groups to provide storage for within its multicast + * group membership table. + */ + __le32 num_mcast_groups; + + /* + * size to alloc for the mcast membership table + * + * This num_mcast_table_elems configuration parameter tells the + * target how many peer elements it needs to provide storage for in + * its multicast group membership table. + * These multicast group membership table elements are shared by the + * multicast groups stored within the table. + */ + __le32 num_mcast_table_elems; + + /* + * whether/how to do multicast->unicast conversion + * + * This configuration parameter specifies whether the target should + * perform multicast --> unicast conversion on transmit, and if so, + * what to do if it finds no entries in its multicast group + * membership table for the multicast IP address in the tx frame. + * Configuration value: + * 0 -> Do not perform multicast to unicast conversion. + * 1 -> Convert multicast frames to unicast, if the IP multicast + * address from the tx frame is found in the multicast group + * membership table. If the IP multicast address is not found, + * drop the frame. + * 2 -> Convert multicast frames to unicast, if the IP multicast + * address from the tx frame is found in the multicast group + * membership table. If the IP multicast address is not found, + * transmit the frame as multicast. + */ + __le32 mcast2ucast_mode; + + /* + * how much memory to allocate for a tx PPDU dbg log + * + * This parameter controls how much memory the target will allocate + * to store a log of tx PPDU meta-information (how large the PPDU + * was, when it was sent, whether it was successful, etc.) + */ + __le32 tx_dbg_log_size; + + /* how many AST entries to be allocated for WDS */ + __le32 num_wds_entries; + + /* + * MAC DMA burst size, e.g., For target PCI limit can be + * 0 -default, 1 256B + */ + __le32 dma_burst_size; + + /* + * Fixed delimiters to be inserted after every MPDU to + * account for interface latency to avoid underrun. + */ + __le32 mac_aggr_delim; + + /* + * determine whether target is responsible for detecting duplicate + * non-aggregate MPDU and timing out stale fragments. + * + * A-MPDU reordering is always performed on the target. + * + * 0: target responsible for frag timeout and dup checking + * 1: host responsible for frag timeout and dup checking + */ + __le32 rx_skip_defrag_timeout_dup_detection_check; + + /* + * Configuration for VoW : + * No of Video Nodes to be supported + * and Max no of descriptors for each Video link (node). + */ + __le32 vow_config; + + /* Number of msdu descriptors target should use */ + __le32 num_msdu_desc; + + /* + * Max. number of Tx fragments per MSDU + * This parameter controls the max number of Tx fragments per MSDU. + * This is sent by the target as part of the WMI_SERVICE_READY event + * and is overridden by the OS shim as required. + */ + __le32 max_frag_entries; +} __packed; + +enum wmi_10_2_feature_mask { + WMI_10_2_RX_BATCH_MODE = BIT(0), + WMI_10_2_ATF_CONFIG = BIT(1), + WMI_10_2_COEX_GPIO = BIT(3), + WMI_10_2_BSS_CHAN_INFO = BIT(6), + WMI_10_2_PEER_STATS = BIT(7), +}; + +struct wmi_resource_config_10_2 { + struct wmi_resource_config_10x common; + __le32 max_peer_ext_stats; + __le32 smart_ant_cap; /* 0-disable, 1-enable */ + __le32 bk_min_free; + __le32 be_min_free; + __le32 vi_min_free; + __le32 vo_min_free; + __le32 feature_mask; +} __packed; + +#define NUM_UNITS_IS_NUM_VDEVS BIT(0) +#define NUM_UNITS_IS_NUM_PEERS BIT(1) +#define NUM_UNITS_IS_NUM_ACTIVE_PEERS BIT(2) + +struct wmi_resource_config_10_4 { + /* Number of virtual devices (VAPs) to support */ + __le32 num_vdevs; + + /* Number of peer nodes to support */ + __le32 num_peers; + + /* Number of active peer nodes to support */ + __le32 num_active_peers; + + /* In offload mode, target supports features like WOW, chatter and other + * protocol offloads. In order to support them some functionalities like + * reorder buffering, PN checking need to be done in target. + * This determines maximum number of peers supported by target in + * offload mode. + */ + __le32 num_offload_peers; + + /* Number of reorder buffers available for doing target based reorder + * Rx reorder buffering + */ + __le32 num_offload_reorder_buffs; + + /* Number of keys per peer */ + __le32 num_peer_keys; + + /* Total number of TX/RX data TIDs */ + __le32 num_tids; + + /* Max skid for resolving hash collisions. + * The address search table is sparse, so that if two MAC addresses + * result in the same hash value, the second of these conflicting + * entries can slide to the next index in the address search table, + * and use it, if it is unoccupied. This ast_skid_limit parameter + * specifies the upper bound on how many subsequent indices to search + * over to find an unoccupied space. + */ + __le32 ast_skid_limit; + + /* The nominal chain mask for transmit. + * The chain mask may be modified dynamically, e.g. to operate AP tx + * with a reduced number of chains if no clients are associated. + * This configuration parameter specifies the nominal chain-mask that + * should be used when not operating with a reduced set of tx chains. + */ + __le32 tx_chain_mask; + + /* The nominal chain mask for receive. + * The chain mask may be modified dynamically, e.g. for a client to use + * a reduced number of chains for receive if the traffic to the client + * is low enough that it doesn't require downlink MIMO or antenna + * diversity. This configuration parameter specifies the nominal + * chain-mask that should be used when not operating with a reduced + * set of rx chains. + */ + __le32 rx_chain_mask; + + /* What rx reorder timeout (ms) to use for the AC. + * Each WMM access class (voice, video, best-effort, background) will + * have its own timeout value to dictate how long to wait for missing + * rx MPDUs to arrive before flushing subsequent MPDUs that have already + * been received. This parameter specifies the timeout in milliseconds + * for each class. + */ + __le32 rx_timeout_pri[4]; + + /* What mode the rx should decap packets to. + * MAC can decap to RAW (no decap), native wifi or Ethernet types. + * This setting also determines the default TX behavior, however TX + * behavior can be modified on a per VAP basis during VAP init + */ + __le32 rx_decap_mode; + + __le32 scan_max_pending_req; + + __le32 bmiss_offload_max_vdev; + + __le32 roam_offload_max_vdev; + + __le32 roam_offload_max_ap_profiles; + + /* How many groups to use for mcast->ucast conversion. + * The target's WAL maintains a table to hold information regarding + * which peers belong to a given multicast group, so that if + * multicast->unicast conversion is enabled, the target can convert + * multicast tx frames to a series of unicast tx frames, to each peer + * within the multicast group. This num_mcast_groups configuration + * parameter tells the target how many multicast groups to provide + * storage for within its multicast group membership table. + */ + __le32 num_mcast_groups; + + /* Size to alloc for the mcast membership table. + * This num_mcast_table_elems configuration parameter tells the target + * how many peer elements it needs to provide storage for in its + * multicast group membership table. These multicast group membership + * table elements are shared by the multicast groups stored within + * the table. + */ + __le32 num_mcast_table_elems; + + /* Whether/how to do multicast->unicast conversion. + * This configuration parameter specifies whether the target should + * perform multicast --> unicast conversion on transmit, and if so, + * what to do if it finds no entries in its multicast group membership + * table for the multicast IP address in the tx frame. + * Configuration value: + * 0 -> Do not perform multicast to unicast conversion. + * 1 -> Convert multicast frames to unicast, if the IP multicast address + * from the tx frame is found in the multicast group membership + * table. If the IP multicast address is not found, drop the frame + * 2 -> Convert multicast frames to unicast, if the IP multicast address + * from the tx frame is found in the multicast group membership + * table. If the IP multicast address is not found, transmit the + * frame as multicast. + */ + __le32 mcast2ucast_mode; + + /* How much memory to allocate for a tx PPDU dbg log. + * This parameter controls how much memory the target will allocate to + * store a log of tx PPDU meta-information (how large the PPDU was, + * when it was sent, whether it was successful, etc.) + */ + __le32 tx_dbg_log_size; + + /* How many AST entries to be allocated for WDS */ + __le32 num_wds_entries; + + /* MAC DMA burst size. 0 -default, 1 -256B */ + __le32 dma_burst_size; + + /* Fixed delimiters to be inserted after every MPDU to account for + * interface latency to avoid underrun. + */ + __le32 mac_aggr_delim; + + /* Determine whether target is responsible for detecting duplicate + * non-aggregate MPDU and timing out stale fragments. A-MPDU reordering + * is always performed on the target. + * + * 0: target responsible for frag timeout and dup checking + * 1: host responsible for frag timeout and dup checking + */ + __le32 rx_skip_defrag_timeout_dup_detection_check; + + /* Configuration for VoW : No of Video nodes to be supported and max + * no of descriptors for each video link (node). + */ + __le32 vow_config; + + /* Maximum vdev that could use gtk offload */ + __le32 gtk_offload_max_vdev; + + /* Number of msdu descriptors target should use */ + __le32 num_msdu_desc; + + /* Max number of tx fragments per MSDU. + * This parameter controls the max number of tx fragments per MSDU. + * This will passed by target as part of the WMI_SERVICE_READY event + * and is overridden by the OS shim as required. + */ + __le32 max_frag_entries; + + /* Max number of extended peer stats. + * This parameter controls the max number of peers for which extended + * statistics are supported by target + */ + __le32 max_peer_ext_stats; + + /* Smart antenna capabilities information. + * 1 - Smart antenna is enabled + * 0 - Smart antenna is disabled + * In future this can contain smart antenna specific capabilities. + */ + __le32 smart_ant_cap; + + /* User can configure the buffers allocated for each AC (BE, BK, VI, VO) + * during init. + */ + __le32 bk_minfree; + __le32 be_minfree; + __le32 vi_minfree; + __le32 vo_minfree; + + /* Rx batch mode capability. + * 1 - Rx batch mode enabled + * 0 - Rx batch mode disabled + */ + __le32 rx_batchmode; + + /* Thermal throttling capability. + * 1 - Capable of thermal throttling + * 0 - Not capable of thermal throttling + */ + __le32 tt_support; + + /* ATF configuration. + * 1 - Enable ATF + * 0 - Disable ATF + */ + __le32 atf_config; + + /* Configure padding to manage IP header un-alignment + * 1 - Enable padding + * 0 - Disable padding + */ + __le32 iphdr_pad_config; + + /* qwrap configuration (bits 15-0) + * 1 - This is qwrap configuration + * 0 - This is not qwrap + * + * Bits 31-16 is alloc_frag_desc_for_data_pkt (1 enables, 0 disables) + * In order to get ack-RSSI reporting and to specify the tx-rate for + * individual frames, this option must be enabled. This uses an extra + * 4 bytes per tx-msdu descriptor, so don't enable it unless you need it. + */ + __le32 qwrap_config; +} __packed; + +enum wmi_coex_version { + WMI_NO_COEX_VERSION_SUPPORT = 0, + /* 3 wire coex support*/ + WMI_COEX_VERSION_1 = 1, + /* 2.5 wire coex support*/ + WMI_COEX_VERSION_2 = 2, + /* 2.5 wire coex with duty cycle support */ + WMI_COEX_VERSION_3 = 3, + /* 4 wire coex support*/ + WMI_COEX_VERSION_4 = 4, +}; + +/** + * enum wmi_10_4_feature_mask - WMI 10.4 feature enable/disable flags + * @WMI_10_4_LTEU_SUPPORT: LTEU config + * @WMI_10_4_COEX_GPIO_SUPPORT: COEX GPIO config + * @WMI_10_4_AUX_RADIO_SPECTRAL_INTF: AUX Radio Enhancement for spectral scan + * @WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF: AUX Radio Enhancement for chan load scan + * @WMI_10_4_BSS_CHANNEL_INFO_64: BSS channel info stats + * @WMI_10_4_PEER_STATS: Per station stats + * @WMI_10_4_VDEV_STATS: Per vdev stats + * @WMI_10_4_TDLS: Implicit TDLS support in firmware enable/disable + * @WMI_10_4_TDLS_OFFCHAN: TDLS offchannel support enable/disable + * @WMI_10_4_TDLS_UAPSD_BUFFER_STA: TDLS buffer sta support enable/disable + * @WMI_10_4_TDLS_UAPSD_SLEEP_STA: TDLS sleep sta support enable/disable + * @WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE: TDLS connection tracker in host + * enable/disable + * @WMI_10_4_TDLS_EXPLICIT_MODE_ONLY: Explicit TDLS mode enable/disable + * @WMI_10_4_TX_DATA_ACK_RSSI: Enable DATA ACK RSSI if firmware is capable + * @WMI_10_4_EXT_PEER_TID_CONFIGS_SUPPORT: Firmware supports Extended Peer + * TID configuration for QoS related settings + * @WMI_10_4_REPORT_AIRTIME: Firmware supports transmit airtime reporting + */ +enum wmi_10_4_feature_mask { + WMI_10_4_LTEU_SUPPORT = BIT(0), + WMI_10_4_COEX_GPIO_SUPPORT = BIT(1), + WMI_10_4_AUX_RADIO_SPECTRAL_INTF = BIT(2), + WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF = BIT(3), + WMI_10_4_BSS_CHANNEL_INFO_64 = BIT(4), + WMI_10_4_PEER_STATS = BIT(5), + WMI_10_4_VDEV_STATS = BIT(6), + WMI_10_4_TDLS = BIT(7), + WMI_10_4_TDLS_OFFCHAN = BIT(8), + WMI_10_4_TDLS_UAPSD_BUFFER_STA = BIT(9), + WMI_10_4_TDLS_UAPSD_SLEEP_STA = BIT(10), + WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE = BIT(11), + WMI_10_4_TDLS_EXPLICIT_MODE_ONLY = BIT(12), + WMI_10_4_TX_DATA_ACK_RSSI = BIT(16), + WMI_10_4_EXT_PEER_TID_CONFIGS_SUPPORT = BIT(17), + WMI_10_4_REPORT_AIRTIME = BIT(18), + +}; + +/* WMI_GPIO_CONFIG_CMDID */ +enum { + WMI_GPIO_PULL_NONE, + WMI_GPIO_PULL_UP, + WMI_GPIO_PULL_DOWN, +}; + +enum { + WMI_GPIO_INTTYPE_DISABLE, + WMI_GPIO_INTTYPE_RISING_EDGE, + WMI_GPIO_INTTYPE_FALLING_EDGE, + WMI_GPIO_INTTYPE_BOTH_EDGE, + WMI_GPIO_INTTYPE_LEVEL_LOW, + WMI_GPIO_INTTYPE_LEVEL_HIGH +}; + +/* WMI_GPIO_CONFIG_CMDID */ +struct wmi_gpio_config_cmd { + __le32 gpio_num; /* GPIO number to be setup */ + __le32 input; /* 0 - Output/ 1 - Input */ + __le32 pull_type; /* Pull type defined above */ + __le32 intr_mode; /* Interrupt mode defined above (Input) */ +} __packed; + +/* WMI_GPIO_OUTPUT_CMDID */ +struct wmi_gpio_output_cmd { + __le32 gpio_num; /* GPIO number to be setup */ + __le32 set; /* Set the GPIO pin*/ +} __packed; + +/* WMI_GPIO_INPUT_EVENTID */ +struct wmi_gpio_input_event { + __le32 gpio_num; /* GPIO number which changed state */ +} __packed; + +struct wmi_ext_resource_config_10_4_cmd { + /* contains enum wmi_host_platform_type */ + __le32 host_platform_config; + /* see enum wmi_10_4_feature_mask */ + __le32 fw_feature_bitmap; + /* WLAN priority GPIO number */ + __le32 wlan_gpio_priority; + /* see enum wmi_coex_version */ + __le32 coex_version; + /* COEX GPIO config */ + __le32 coex_gpio_pin1; + __le32 coex_gpio_pin2; + __le32 coex_gpio_pin3; + /* number of vdevs allowed to perform tdls */ + __le32 num_tdls_vdevs; + /* number of peers to track per TDLS vdev */ + __le32 num_tdls_conn_table_entries; + /* number of tdls sleep sta supported */ + __le32 max_tdls_concurrent_sleep_sta; + /* number of tdls buffer sta supported */ + __le32 max_tdls_concurrent_buffer_sta; +}; + +/* structure describing host memory chunk. */ struct host_memory_chunk { /* id of the request that is passed up in service ready */ __le32 req_id; @@ -987,34 +3102,61 @@ struct host_memory_chunk { __le32 size; } __packed; +#define WMI_IRAM_RECOVERY_HOST_MEM_REQ_ID 8 + +struct wmi_host_mem_chunks { + __le32 count; + /* some fw revisions require at least 1 chunk regardless of count */ + union { + struct host_memory_chunk item; + DECLARE_FLEX_ARRAY(struct host_memory_chunk, items); + }; +} __packed; + struct wmi_init_cmd { struct wmi_resource_config resource_config; - __le32 num_host_mem_chunks; + struct wmi_host_mem_chunks mem_chunks; +} __packed; - /* - * variable number of host memory chunks. - * This should be the last element in the structure - */ - struct host_memory_chunk host_mem_chunks[1]; +/* _10x structure is from 10.X FW API */ +struct wmi_init_cmd_10x { + struct wmi_resource_config_10x resource_config; + struct wmi_host_mem_chunks mem_chunks; +} __packed; + +struct wmi_init_cmd_10_2 { + struct wmi_resource_config_10_2 resource_config; + struct wmi_host_mem_chunks mem_chunks; +} __packed; + +struct wmi_init_cmd_10_4 { + struct wmi_resource_config_10_4 resource_config; + struct wmi_host_mem_chunks mem_chunks; +} __packed; + +struct wmi_chan_list_entry { + __le16 freq; + u8 phy_mode; /* valid for 10.2 only */ + u8 reserved; } __packed; /* TLV for channel list */ struct wmi_chan_list { __le32 tag; /* WMI_CHAN_LIST_TAG */ __le32 num_chan; - __le32 channel_list[0]; + struct wmi_chan_list_entry channel_list[]; } __packed; struct wmi_bssid_list { __le32 tag; /* WMI_BSSID_LIST_TAG */ __le32 num_bssid; - struct wmi_mac_addr bssid_list[0]; + struct wmi_mac_addr bssid_list[]; } __packed; struct wmi_ie_data { __le32 tag; /* WMI_IE_TAG */ __le32 ie_len; - u8 ie_data[0]; + u8 ie_data[]; } __packed; struct wmi_ssid { @@ -1025,7 +3167,7 @@ struct wmi_ssid { struct wmi_ssid_list { __le32 tag; /* WMI_SSID_LIST_TAG */ __le32 num_ssids; - struct wmi_ssid ssids[0]; + struct wmi_ssid ssids[]; } __packed; /* prefix used by scan requestor ids on the host */ @@ -1039,6 +3181,11 @@ struct wmi_ssid_list { #define WLAN_SCAN_PARAMS_MAX_BSSID 4 #define WLAN_SCAN_PARAMS_MAX_IE_LEN 256 +/* Values lower than this may be refused by some firmware revisions with a scan + * completion with a timedout reason. + */ +#define WMI_SCAN_CHAN_MIN_TIME_MSEC 40 + /* Scan priority numbers must be sequential, starting with 0 */ enum wmi_scan_priority { WMI_SCAN_PRIORITY_VERY_LOW = 0, @@ -1049,7 +3196,7 @@ enum wmi_scan_priority { WMI_SCAN_PRIORITY_COUNT /* number of priorities supported */ }; -struct wmi_start_scan_cmd { +struct wmi_start_scan_common { /* Scan ID */ __le32 scan_id; /* Scan requestor ID */ @@ -1065,7 +3212,7 @@ struct wmi_start_scan_cmd { /* dwell time in msec on passive channels */ __le32 dwell_time_passive; /* - * min time in msec on the BSS channel,only valid if atleast one + * min time in msec on the BSS channel,only valid if at least one * VDEV is active */ __le32 min_rest_time; @@ -1091,7 +3238,7 @@ struct wmi_start_scan_cmd { * and bssid_list */ __le32 repeat_probe_time; - /* time in msec between 2 consequetive probe requests with in a set. */ + /* time in msec between 2 consecutive probe requests with in a set. */ __le32 probe_spacing_time; /* * data inactivity time in msec on bss channel that will be used by @@ -1107,14 +3254,18 @@ struct wmi_start_scan_cmd { __le32 probe_delay; /* Scan control flags */ __le32 scan_ctrl_flags; +} __packed; - /* Burst duration time in msecs */ - __le32 burst_duration; - /* - * TLV (tag length value ) paramerters follow the scan_cmd structure. - * TLV can contain channel list, bssid list, ssid list and - * ie. the TLV tags are defined above; - */ +struct wmi_start_scan_cmd { + struct wmi_start_scan_common common; + __le32 burst_duration_ms; + u8 tlvs[]; +} __packed; + +/* This is the definition from 10.X firmware branch */ +struct wmi_10x_start_scan_cmd { + struct wmi_start_scan_common common; + u8 tlvs[]; } __packed; struct wmi_ssid_arg { @@ -1142,6 +3293,7 @@ struct wmi_start_scan_arg { u32 max_scan_time; u32 probe_delay; u32 scan_ctrl_flags; + u32 burst_duration_ms; u32 ie_len; u32 n_channels; @@ -1149,9 +3301,11 @@ struct wmi_start_scan_arg { u32 n_bssids; u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN]; - u32 channels[64]; + u16 channels[64]; struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID]; struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID]; + struct wmi_mac_addr mac_addr; + struct wmi_mac_addr mac_mask; }; /* scan control flags */ @@ -1171,13 +3325,19 @@ struct wmi_start_scan_arg { /* When set, DFS channels will not be scanned */ #define WMI_SCAN_BYPASS_DFS_CHN 0x40 /* Different FW scan engine may choose to bail out on errors. - * Allow the driver to have influence over that. */ + * Allow the driver to have influence over that. + */ #define WMI_SCAN_CONTINUE_ON_ERROR 0x80 +/* Use random MAC address for TA for Probe Request frame and add + * OUI specified by WMI_SCAN_PROB_REQ_OUI_CMDID to the Probe Request frame. + * if OUI is not set by WMI_SCAN_PROB_REQ_OUI_CMDID then the flag is ignored. + */ +#define WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ 0x1000 + /* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */ #define WMI_SCAN_CLASS_MASK 0xFF000000 - enum wmi_stop_scan_type { WMI_SCAN_STOP_ONE = 0x00000000, /* stop by scan_id */ WMI_SCAN_STOP_VDEV_ALL = 0x01000000, /* stop by vdev_id */ @@ -1202,7 +3362,7 @@ struct wmi_stop_scan_arg { struct wmi_scan_chan_list_cmd { __le32 num_scan_chans; - struct wmi_channel chan_info[0]; + struct wmi_channel chan_info[]; } __packed; struct wmi_scan_chan_list_arg { @@ -1222,15 +3382,17 @@ enum wmi_bss_filter { }; enum wmi_scan_event_type { - WMI_SCAN_EVENT_STARTED = 0x1, - WMI_SCAN_EVENT_COMPLETED = 0x2, - WMI_SCAN_EVENT_BSS_CHANNEL = 0x4, - WMI_SCAN_EVENT_FOREIGN_CHANNEL = 0x8, - WMI_SCAN_EVENT_DEQUEUED = 0x10, - WMI_SCAN_EVENT_PREEMPTED = 0x20, /* possibly by high-prio scan */ - WMI_SCAN_EVENT_START_FAILED = 0x40, - WMI_SCAN_EVENT_RESTARTED = 0x80, - WMI_SCAN_EVENT_MAX = 0x8000 + WMI_SCAN_EVENT_STARTED = BIT(0), + WMI_SCAN_EVENT_COMPLETED = BIT(1), + WMI_SCAN_EVENT_BSS_CHANNEL = BIT(2), + WMI_SCAN_EVENT_FOREIGN_CHANNEL = BIT(3), + WMI_SCAN_EVENT_DEQUEUED = BIT(4), + /* possibly by high-prio scan */ + WMI_SCAN_EVENT_PREEMPTED = BIT(5), + WMI_SCAN_EVENT_START_FAILED = BIT(6), + WMI_SCAN_EVENT_RESTARTED = BIT(7), + WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT = BIT(8), + WMI_SCAN_EVENT_MAX = BIT(15), }; enum wmi_scan_completion_reason { @@ -1238,6 +3400,7 @@ enum wmi_scan_completion_reason { WMI_SCAN_REASON_CANCELLED, WMI_SCAN_REASON_PREEMPTED, WMI_SCAN_REASON_TIMEDOUT, + WMI_SCAN_REASON_INTERNAL_FAILURE, WMI_SCAN_REASON_MAX, }; @@ -1267,7 +3430,7 @@ struct wmi_scan_event { * good idea to pass all the fields in the RX status * descriptor up to the host. */ -struct wmi_mgmt_rx_hdr { +struct wmi_mgmt_rx_hdr_v1 { __le32 channel; __le32 snr; __le32 rate; @@ -1276,108 +3439,202 @@ struct wmi_mgmt_rx_hdr { __le32 status; /* %WMI_RX_STATUS_ */ } __packed; -struct wmi_mgmt_rx_event { - struct wmi_mgmt_rx_hdr hdr; - u8 buf[0]; +struct wmi_mgmt_rx_hdr_v2 { + struct wmi_mgmt_rx_hdr_v1 v1; + __le32 rssi_ctl[4]; +} __packed; + +struct wmi_mgmt_rx_event_v1 { + struct wmi_mgmt_rx_hdr_v1 hdr; + u8 buf[]; } __packed; +struct wmi_mgmt_rx_event_v2 { + struct wmi_mgmt_rx_hdr_v2 hdr; + u8 buf[]; +} __packed; + +struct wmi_10_4_mgmt_rx_hdr { + __le32 channel; + __le32 snr; + u8 rssi_ctl[4]; + __le32 rate; + __le32 phy_mode; + __le32 buf_len; + __le32 status; +} __packed; + +struct wmi_10_4_mgmt_rx_event { + struct wmi_10_4_mgmt_rx_hdr hdr; + u8 buf[]; +} __packed; + +struct wmi_mgmt_rx_ext_info { + __le64 rx_mac_timestamp; +} __packed __aligned(4); + #define WMI_RX_STATUS_OK 0x00 #define WMI_RX_STATUS_ERR_CRC 0x01 #define WMI_RX_STATUS_ERR_DECRYPT 0x08 #define WMI_RX_STATUS_ERR_MIC 0x10 #define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20 +/* Extension data at the end of mgmt frame */ +#define WMI_RX_STATUS_EXT_INFO 0x40 -struct wmi_single_phyerr_rx_hdr { - /* TSF timestamp */ - __le32 tsf_timestamp; +#define PHY_ERROR_GEN_SPECTRAL_SCAN 0x26 +#define PHY_ERROR_GEN_FALSE_RADAR_EXT 0x24 +#define PHY_ERROR_GEN_RADAR 0x05 - /* - * Current freq1, freq2 - * - * [7:0]: freq1[lo] - * [15:8] : freq1[hi] - * [23:16]: freq2[lo] - * [31:24]: freq2[hi] - */ +#define PHY_ERROR_10_4_RADAR_MASK 0x4 +#define PHY_ERROR_10_4_SPECTRAL_SCAN_MASK 0x4000000 + +enum phy_err_type { + PHY_ERROR_UNKNOWN, + PHY_ERROR_SPECTRAL_SCAN, + PHY_ERROR_FALSE_RADAR_EXT, + PHY_ERROR_RADAR +}; + +struct wmi_phyerr { + __le32 tsf_timestamp; __le16 freq1; __le16 freq2; + u8 rssi_combined; + u8 chan_width_mhz; + u8 phy_err_code; + u8 rsvd0; + __le32 rssi_chains[4]; + __le16 nf_chains[4]; + __le32 buf_len; + u8 buf[]; +} __packed; - /* - * Combined RSSI over all chains and channel width for this PHY error - * - * [7:0]: RSSI combined - * [15:8]: Channel width (MHz) - * [23:16]: PHY error code - * [24:16]: reserved (future use) - */ +struct wmi_phyerr_event { + __le32 num_phyerrs; + __le32 tsf_l32; + __le32 tsf_u32; + + /* array of struct wmi_phyerr */ + u8 phyerrs[]; +} __packed; + +struct wmi_10_4_phyerr_event { + __le32 tsf_l32; + __le32 tsf_u32; + __le16 freq1; + __le16 freq2; u8 rssi_combined; u8 chan_width_mhz; u8 phy_err_code; u8 rsvd0; + __le32 rssi_chains[4]; + __le16 nf_chains[4]; + __le32 phy_err_mask[2]; + __le32 tsf_timestamp; + __le32 buf_len; + u8 buf[]; +} __packed; - /* - * RSSI on chain 0 through 3 - * - * This is formatted the same as the PPDU_START RX descriptor - * field: - * - * [7:0]: pri20 - * [15:8]: sec20 - * [23:16]: sec40 - * [31:24]: sec80 - */ +struct wmi_radar_found_info { + __le32 pri_min; + __le32 pri_max; + __le32 width_min; + __le32 width_max; + __le32 sidx_min; + __le32 sidx_max; +} __packed; - __le32 rssi_chain0; - __le32 rssi_chain1; - __le32 rssi_chain2; - __le32 rssi_chain3; +enum wmi_radar_confirmation_status { + /* Detected radar was due to SW pulses */ + WMI_SW_RADAR_DETECTED = 0, - /* - * Last calibrated NF value for chain 0 through 3 - * - * nf_list_1: - * - * + [15:0] - chain 0 - * + [31:16] - chain 1 - * - * nf_list_2: - * - * + [15:0] - chain 2 - * + [31:16] - chain 3 - */ - __le32 nf_list_1; - __le32 nf_list_2; + WMI_RADAR_DETECTION_FAIL = 1, + /* Real radar detected */ + WMI_HW_RADAR_DETECTED = 2, +}; - /* Length of the frame */ - __le32 buf_len; -} __packed; +#define PHYERR_TLV_SIG 0xBB +#define PHYERR_TLV_TAG_SEARCH_FFT_REPORT 0xFB +#define PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY 0xF8 +#define PHYERR_TLV_TAG_SPECTRAL_SUMMARY_REPORT 0xF9 -struct wmi_single_phyerr_rx_event { - /* Phy error event header */ - struct wmi_single_phyerr_rx_hdr hdr; - /* frame buffer */ - u8 bufp[0]; +struct phyerr_radar_report { + __le32 reg0; /* RADAR_REPORT_REG0_* */ + __le32 reg1; /* RADAR_REPORT_REG1_* */ } __packed; -struct wmi_comb_phyerr_rx_hdr { - /* Phy error phy error count */ - __le32 num_phyerr_events; - __le32 tsf_l32; - __le32 tsf_u32; +#define RADAR_REPORT_REG0_PULSE_IS_CHIRP_MASK 0x80000000 +#define RADAR_REPORT_REG0_PULSE_IS_CHIRP_LSB 31 + +#define RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH_MASK 0x40000000 +#define RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH_LSB 30 + +#define RADAR_REPORT_REG0_AGC_TOTAL_GAIN_MASK 0x3FF00000 +#define RADAR_REPORT_REG0_AGC_TOTAL_GAIN_LSB 20 + +#define RADAR_REPORT_REG0_PULSE_DELTA_DIFF_MASK 0x000F0000 +#define RADAR_REPORT_REG0_PULSE_DELTA_DIFF_LSB 16 + +#define RADAR_REPORT_REG0_PULSE_DELTA_PEAK_MASK 0x0000FC00 +#define RADAR_REPORT_REG0_PULSE_DELTA_PEAK_LSB 10 + +#define RADAR_REPORT_REG0_PULSE_SIDX_MASK 0x000003FF +#define RADAR_REPORT_REG0_PULSE_SIDX_LSB 0 + +#define RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID_MASK 0x80000000 +#define RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID_LSB 31 + +#define RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN_MASK 0x7F000000 +#define RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN_LSB 24 + +#define RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK_MASK 0x00FF0000 +#define RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK_LSB 16 + +#define RADAR_REPORT_REG1_PULSE_TSF_OFFSET_MASK 0x0000FF00 +#define RADAR_REPORT_REG1_PULSE_TSF_OFFSET_LSB 8 + +#define RADAR_REPORT_REG1_PULSE_DUR_MASK 0x000000FF +#define RADAR_REPORT_REG1_PULSE_DUR_LSB 0 + +struct phyerr_fft_report { + __le32 reg0; /* SEARCH_FFT_REPORT_REG0_ * */ + __le32 reg1; /* SEARCH_FFT_REPORT_REG1_ * */ } __packed; -struct wmi_comb_phyerr_rx_event { - /* Phy error phy error count */ - struct wmi_comb_phyerr_rx_hdr hdr; - /* - * frame buffer - contains multiple payloads in the order: - * header - payload, header - payload... - * (The header is of type: wmi_single_phyerr_rx_hdr) - */ - u8 bufp[0]; +#define SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB_MASK 0xFF800000 +#define SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB_LSB 23 + +#define SEARCH_FFT_REPORT_REG0_BASE_PWR_DB_MASK 0x007FC000 +#define SEARCH_FFT_REPORT_REG0_BASE_PWR_DB_LSB 14 + +#define SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX_MASK 0x00003000 +#define SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX_LSB 12 + +#define SEARCH_FFT_REPORT_REG0_PEAK_SIDX_MASK 0x00000FFF +#define SEARCH_FFT_REPORT_REG0_PEAK_SIDX_LSB 0 + +#define SEARCH_FFT_REPORT_REG1_RELPWR_DB_MASK 0xFC000000 +#define SEARCH_FFT_REPORT_REG1_RELPWR_DB_LSB 26 + +#define SEARCH_FFT_REPORT_REG1_AVGPWR_DB_MASK 0x03FC0000 +#define SEARCH_FFT_REPORT_REG1_AVGPWR_DB_LSB 18 + +#define SEARCH_FFT_REPORT_REG1_PEAK_MAG_MASK 0x0003FF00 +#define SEARCH_FFT_REPORT_REG1_PEAK_MAG_LSB 8 + +#define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_MASK 0x000000FF +#define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_LSB 0 + +struct phyerr_tlv { + __le16 len; + u8 tag; + u8 sig; } __packed; +#define DFS_RSSI_POSSIBLY_FALSE 50 +#define DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE 40 + struct wmi_mgmt_tx_hdr { __le32 vdev_id; struct wmi_mac_addr peer_macaddr; @@ -1388,7 +3645,7 @@ struct wmi_mgmt_tx_hdr { struct wmi_mgmt_tx_cmd { struct wmi_mgmt_tx_hdr hdr; - u8 buf[0]; + u8 buf[]; } __packed; struct wmi_echo_event { @@ -1399,7 +3656,6 @@ struct wmi_echo_cmd { __le32 value; } __packed; - struct wmi_pdev_set_regdomain_cmd { __le32 reg_domain; __le32 reg_domain_2G; @@ -1408,6 +3664,31 @@ struct wmi_pdev_set_regdomain_cmd { __le32 conformance_test_limit_5G; } __packed; +enum wmi_dfs_region { + /* Uninitialized dfs domain */ + WMI_UNINIT_DFS_DOMAIN = 0, + + /* FCC3 dfs domain */ + WMI_FCC_DFS_DOMAIN = 1, + + /* ETSI dfs domain */ + WMI_ETSI_DFS_DOMAIN = 2, + + /*Japan dfs domain */ + WMI_MKK4_DFS_DOMAIN = 3, +}; + +struct wmi_pdev_set_regdomain_cmd_10x { + __le32 reg_domain; + __le32 reg_domain_2G; + __le32 reg_domain_5G; + __le32 conformance_test_limit_2G; + __le32 conformance_test_limit_5G; + + /* dfs domain from wmi_dfs_region */ + __le32 dfs_domain; +} __packed; + /* Command to set/unset chip in quiet mode */ struct wmi_pdev_set_quiet_cmd { /* period in TUs */ @@ -1423,7 +3704,6 @@ struct wmi_pdev_set_quiet_cmd { __le32 enabled; } __packed; - /* * 802.11g protection mode. */ @@ -1433,6 +3713,19 @@ enum ath10k_protmode { ATH10K_PROT_RTSCTS = 2, /* RTS-CTS */ }; +enum wmi_rtscts_profile { + WMI_RTSCTS_FOR_NO_RATESERIES = 0, + WMI_RTSCTS_FOR_SECOND_RATESERIES, + WMI_RTSCTS_ACROSS_SW_RETRIES +}; + +#define WMI_RTSCTS_ENABLED 1 +#define WMI_RTSCTS_SET_MASK 0x0f +#define WMI_RTSCTS_SET_LSB 0 + +#define WMI_RTSCTS_PROFILE_MASK 0xf0 +#define WMI_RTSCTS_PROFILE_LSB 4 + enum wmi_beacon_gen_mode { WMI_BEACON_STAGGERED_MODE = 0, WMI_BEACON_BURST_MODE = 1 @@ -1464,10 +3757,110 @@ struct wmi_csa_event { #define VDEV_DEFAULT_STATS_UPDATE_PERIOD 500 #define PEER_DEFAULT_STATS_UPDATE_PERIOD 500 +struct wmi_pdev_param_map { + u32 tx_chain_mask; + u32 rx_chain_mask; + u32 txpower_limit2g; + u32 txpower_limit5g; + u32 txpower_scale; + u32 beacon_gen_mode; + u32 beacon_tx_mode; + u32 resmgr_offchan_mode; + u32 protection_mode; + u32 dynamic_bw; + u32 non_agg_sw_retry_th; + u32 agg_sw_retry_th; + u32 sta_kickout_th; + u32 ac_aggrsize_scaling; + u32 ltr_enable; + u32 ltr_ac_latency_be; + u32 ltr_ac_latency_bk; + u32 ltr_ac_latency_vi; + u32 ltr_ac_latency_vo; + u32 ltr_ac_latency_timeout; + u32 ltr_sleep_override; + u32 ltr_rx_override; + u32 ltr_tx_activity_timeout; + u32 l1ss_enable; + u32 dsleep_enable; + u32 pcielp_txbuf_flush; + u32 pcielp_txbuf_watermark; + u32 pcielp_txbuf_tmo_en; + u32 pcielp_txbuf_tmo_value; + u32 pdev_stats_update_period; + u32 vdev_stats_update_period; + u32 peer_stats_update_period; + u32 bcnflt_stats_update_period; + u32 pmf_qos; + u32 arp_ac_override; + u32 dcs; + u32 ani_enable; + u32 ani_poll_period; + u32 ani_listen_period; + u32 ani_ofdm_level; + u32 ani_cck_level; + u32 dyntxchain; + u32 proxy_sta; + u32 idle_ps_config; + u32 power_gating_sleep; + u32 fast_channel_reset; + u32 burst_dur; + u32 burst_enable; + u32 cal_period; + u32 aggr_burst; + u32 rx_decap_mode; + u32 smart_antenna_default_antenna; + u32 igmpmld_override; + u32 igmpmld_tid; + u32 antenna_gain; + u32 rx_filter; + u32 set_mcast_to_ucast_tid; + u32 proxy_sta_mode; + u32 set_mcast2ucast_mode; + u32 set_mcast2ucast_buffer; + u32 remove_mcast2ucast_buffer; + u32 peer_sta_ps_statechg_enable; + u32 igmpmld_ac_override; + u32 block_interbss; + u32 set_disable_reset_cmdid; + u32 set_msdu_ttl_cmdid; + u32 set_ppdu_duration_cmdid; + u32 txbf_sound_period_cmdid; + u32 set_promisc_mode_cmdid; + u32 set_burst_mode_cmdid; + u32 en_stats; + u32 mu_group_policy; + u32 noise_detection; + u32 noise_threshold; + u32 dpd_enable; + u32 set_mcast_bcast_echo; + u32 atf_strict_sch; + u32 atf_sched_duration; + u32 ant_plzn; + u32 mgmt_retry_limit; + u32 sensitivity_level; + u32 signed_txpower_2g; + u32 signed_txpower_5g; + u32 enable_per_tid_amsdu; + u32 enable_per_tid_ampdu; + u32 cca_threshold; + u32 rts_fixed_rate; + u32 pdev_reset; + u32 wapi_mbssid_offset; + u32 arp_srcaddr; + u32 arp_dstaddr; + u32 enable_btcoex; + u32 rfkill_config; + u32 rfkill_enable; + u32 peer_stats_info_enable; +}; + +#define WMI_PDEV_PARAM_UNSUPPORTED 0 + enum wmi_pdev_param { - /* TX chian mask */ + /* TX chain mask */ WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1, - /* RX chian mask */ + /* RX chain mask */ WMI_PDEV_PARAM_RX_CHAIN_MASK, /* TX power limit for 2G Radio */ WMI_PDEV_PARAM_TXPOWER_LIMIT2G, @@ -1481,7 +3874,7 @@ enum wmi_pdev_param { WMI_PDEV_PARAM_BEACON_TX_MODE, /* * Resource manager off chan mode . - * 0: turn off off chan mode. 1: turn on offchan mode + * 0: turn off offchan mode. 1: turn on offchan mode */ WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE, /* @@ -1489,11 +3882,16 @@ enum wmi_pdev_param { * 0: no protection 1:use CTS-to-self 2: use RTS/CTS */ WMI_PDEV_PARAM_PROTECTION_MODE, - /* Dynamic bandwidth 0: disable 1: enable */ + /* + * Dynamic bandwidth - 0: disable, 1: enable + * + * When enabled HW rate control tries different bandwidths when + * retransmitting frames. + */ WMI_PDEV_PARAM_DYNAMIC_BW, - /* Non aggregrate/ 11g sw retry threshold.0-disable */ + /* Non aggregate/ 11g sw retry threshold.0-disable */ WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH, - /* aggregrate sw retry threshold. 0-disable*/ + /* aggregate sw retry threshold. 0-disable*/ WMI_PDEV_PARAM_AGG_SW_RETRY_TH, /* Station kickout threshold (non of consecutive failures).0-disable */ WMI_PDEV_PARAM_STA_KICKOUT_TH, @@ -1563,18 +3961,251 @@ enum wmi_pdev_param { WMI_PDEV_PARAM_POWER_GATING_SLEEP, }; +enum wmi_10x_pdev_param { + /* TX chian mask */ + WMI_10X_PDEV_PARAM_TX_CHAIN_MASK = 0x1, + /* RX chian mask */ + WMI_10X_PDEV_PARAM_RX_CHAIN_MASK, + /* TX power limit for 2G Radio */ + WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G, + /* TX power limit for 5G Radio */ + WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G, + /* TX power scale */ + WMI_10X_PDEV_PARAM_TXPOWER_SCALE, + /* Beacon generation mode . 0: host, 1: target */ + WMI_10X_PDEV_PARAM_BEACON_GEN_MODE, + /* Beacon generation mode . 0: staggered 1: bursted */ + WMI_10X_PDEV_PARAM_BEACON_TX_MODE, + /* + * Resource manager off chan mode . + * 0: turn off offchan mode. 1: turn on offchan mode + */ + WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE, + /* + * Protection mode: + * 0: no protection 1:use CTS-to-self 2: use RTS/CTS + */ + WMI_10X_PDEV_PARAM_PROTECTION_MODE, + /* Dynamic bandwidth 0: disable 1: enable */ + WMI_10X_PDEV_PARAM_DYNAMIC_BW, + /* Non aggregate/ 11g sw retry threshold.0-disable */ + WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH, + /* aggregate sw retry threshold. 0-disable*/ + WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH, + /* Station kickout threshold (non of consecutive failures).0-disable */ + WMI_10X_PDEV_PARAM_STA_KICKOUT_TH, + /* Aggerate size scaling configuration per AC */ + WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING, + /* LTR enable */ + WMI_10X_PDEV_PARAM_LTR_ENABLE, + /* LTR latency for BE, in us */ + WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE, + /* LTR latency for BK, in us */ + WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK, + /* LTR latency for VI, in us */ + WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI, + /* LTR latency for VO, in us */ + WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO, + /* LTR AC latency timeout, in ms */ + WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, + /* LTR platform latency override, in us */ + WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE, + /* LTR-RX override, in us */ + WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE, + /* Tx activity timeout for LTR, in us */ + WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, + /* L1SS state machine enable */ + WMI_10X_PDEV_PARAM_L1SS_ENABLE, + /* Deep sleep state machine enable */ + WMI_10X_PDEV_PARAM_DSLEEP_ENABLE, + /* pdev level stats update period in ms */ + WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, + /* vdev level stats update period in ms */ + WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, + /* peer level stats update period in ms */ + WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, + /* beacon filter status update period */ + WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, + /* QOS Mgmt frame protection MFP/PMF 0: disable, 1: enable */ + WMI_10X_PDEV_PARAM_PMF_QOS, + /* Access category on which ARP and DHCP frames are sent */ + WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE, + /* DCS configuration */ + WMI_10X_PDEV_PARAM_DCS, + /* Enable/Disable ANI on target */ + WMI_10X_PDEV_PARAM_ANI_ENABLE, + /* configure the ANI polling period */ + WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD, + /* configure the ANI listening period */ + WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD, + /* configure OFDM immunity level */ + WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL, + /* configure CCK immunity level */ + WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL, + /* Enable/Disable CDD for 1x1 STAs in rate control module */ + WMI_10X_PDEV_PARAM_DYNTXCHAIN, + /* Enable/Disable Fast channel reset*/ + WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET, + /* Set Bursting DUR */ + WMI_10X_PDEV_PARAM_BURST_DUR, + /* Set Bursting Enable*/ + WMI_10X_PDEV_PARAM_BURST_ENABLE, + + /* following are available as of firmware 10.2 */ + WMI_10X_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA, + WMI_10X_PDEV_PARAM_IGMPMLD_OVERRIDE, + WMI_10X_PDEV_PARAM_IGMPMLD_TID, + WMI_10X_PDEV_PARAM_ANTENNA_GAIN, + WMI_10X_PDEV_PARAM_RX_DECAP_MODE, + WMI_10X_PDEV_PARAM_RX_FILTER, + WMI_10X_PDEV_PARAM_SET_MCAST_TO_UCAST_TID, + WMI_10X_PDEV_PARAM_PROXY_STA_MODE, + WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_MODE, + WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_BUFFER, + WMI_10X_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER, + WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE, + WMI_10X_PDEV_PARAM_RTS_FIXED_RATE, + WMI_10X_PDEV_PARAM_CAL_PERIOD, + WMI_10X_PDEV_PARAM_ATF_STRICT_SCH, + WMI_10X_PDEV_PARAM_ATF_SCHED_DURATION, + WMI_10X_PDEV_PARAM_SET_PROMISC_MODE_CMDID, + WMI_10X_PDEV_PARAM_PDEV_RESET +}; + +enum wmi_10_4_pdev_param { + WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK = 0x1, + WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK, + WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G, + WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G, + WMI_10_4_PDEV_PARAM_TXPOWER_SCALE, + WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE, + WMI_10_4_PDEV_PARAM_BEACON_TX_MODE, + WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE, + WMI_10_4_PDEV_PARAM_PROTECTION_MODE, + WMI_10_4_PDEV_PARAM_DYNAMIC_BW, + WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH, + WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH, + WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH, + WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING, + WMI_10_4_PDEV_PARAM_LTR_ENABLE, + WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE, + WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK, + WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI, + WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO, + WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, + WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE, + WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE, + WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, + WMI_10_4_PDEV_PARAM_L1SS_ENABLE, + WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE, + WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH, + WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK, + WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, + WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE, + WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, + WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, + WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, + WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, + WMI_10_4_PDEV_PARAM_PMF_QOS, + WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE, + WMI_10_4_PDEV_PARAM_DCS, + WMI_10_4_PDEV_PARAM_ANI_ENABLE, + WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD, + WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD, + WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL, + WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL, + WMI_10_4_PDEV_PARAM_DYNTXCHAIN, + WMI_10_4_PDEV_PARAM_PROXY_STA, + WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG, + WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP, + WMI_10_4_PDEV_PARAM_AGGR_BURST, + WMI_10_4_PDEV_PARAM_RX_DECAP_MODE, + WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET, + WMI_10_4_PDEV_PARAM_BURST_DUR, + WMI_10_4_PDEV_PARAM_BURST_ENABLE, + WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA, + WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE, + WMI_10_4_PDEV_PARAM_IGMPMLD_TID, + WMI_10_4_PDEV_PARAM_ANTENNA_GAIN, + WMI_10_4_PDEV_PARAM_RX_FILTER, + WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID, + WMI_10_4_PDEV_PARAM_PROXY_STA_MODE, + WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE, + WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER, + WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER, + WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE, + WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE, + WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS, + WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID, + WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID, + WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID, + WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID, + WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID, + WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID, + WMI_10_4_PDEV_PARAM_EN_STATS, + WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY, + WMI_10_4_PDEV_PARAM_NOISE_DETECTION, + WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD, + WMI_10_4_PDEV_PARAM_DPD_ENABLE, + WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO, + WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH, + WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION, + WMI_10_4_PDEV_PARAM_ANT_PLZN, + WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT, + WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL, + WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G, + WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G, + WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU, + WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU, + WMI_10_4_PDEV_PARAM_CCA_THRESHOLD, + WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE, + WMI_10_4_PDEV_PARAM_CAL_PERIOD, + WMI_10_4_PDEV_PARAM_PDEV_RESET, + WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET, + WMI_10_4_PDEV_PARAM_ARP_SRCADDR, + WMI_10_4_PDEV_PARAM_ARP_DSTADDR, + WMI_10_4_PDEV_PARAM_TXPOWER_DECR_DB, + WMI_10_4_PDEV_PARAM_RX_BATCHMODE, + WMI_10_4_PDEV_PARAM_PACKET_AGGR_DELAY, + WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCH, + WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR, + WMI_10_4_PDEV_PARAM_CUST_TXPOWER_SCALE, + WMI_10_4_PDEV_PARAM_ATF_DYNAMIC_ENABLE, + WMI_10_4_PDEV_PARAM_ATF_SSID_GROUP_POLICY, + WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX, +}; + struct wmi_pdev_set_param_cmd { __le32 param_id; __le32 param_value; } __packed; +struct wmi_pdev_set_base_macaddr_cmd { + struct wmi_mac_addr mac_addr; +} __packed; + +/* valid period is 1 ~ 60000ms, unit in millisecond */ +#define WMI_PDEV_PARAM_CAL_PERIOD_MAX 60000 + struct wmi_pdev_get_tpc_config_cmd { /* parameter */ __le32 param; } __packed; -#define WMI_TPC_RATE_MAX 160 +#define WMI_TPC_CONFIG_PARAM 1 +#define WMI_TPC_FINAL_RATE_MAX 240 #define WMI_TPC_TX_N_CHAIN 4 +#define WMI_TPC_RATE_MAX (WMI_TPC_TX_N_CHAIN * 65) +#define WMI_TPC_PREAM_TABLE_MAX 10 +#define WMI_TPC_FLAG 3 +#define WMI_TPC_BUF_SIZE 10 +#define WMI_TPC_BEAMFORMING 2 + +enum wmi_tpc_table_type { + WMI_TPC_TABLE_TYPE_CDD = 0, + WMI_TPC_TABLE_TYPE_STBC = 1, + WMI_TPC_TABLE_TYPE_TXBF = 2, +}; enum wmi_tpc_config_event_flag { WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD = 0x1, @@ -1588,7 +4219,7 @@ struct wmi_pdev_tpc_config_event { __le32 phy_mode; __le32 twice_antenna_reduction; __le32 twice_max_rd_power; - s32 twice_antenna_gain; + a_sle32 twice_antenna_gain; __le32 power_limit; __le32 rate_max; __le32 num_tx_chain; @@ -1611,16 +4242,56 @@ enum wmi_tp_scale { WMI_TP_SCALE_SIZE = 5, /* max num of enum */ }; -struct wmi_set_channel_cmd { - /* channel (only frequency and mode info are used) */ - struct wmi_channel chan; +struct wmi_pdev_tpc_final_table_event { + __le32 reg_domain; + __le32 chan_freq; + __le32 phy_mode; + __le32 twice_antenna_reduction; + __le32 twice_max_rd_power; + a_sle32 twice_antenna_gain; + __le32 power_limit; + __le32 rate_max; + __le32 num_tx_chain; + __le32 ctl; + __le32 flags; + s8 max_reg_allow_pow[WMI_TPC_TX_N_CHAIN]; + s8 max_reg_allow_pow_agcdd[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN]; + s8 max_reg_allow_pow_agstbc[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN]; + s8 max_reg_allow_pow_agtxbf[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN]; + u8 rates_array[WMI_TPC_FINAL_RATE_MAX]; + u8 ctl_power_table[WMI_TPC_BEAMFORMING][WMI_TPC_TX_N_CHAIN] + [WMI_TPC_TX_N_CHAIN]; } __packed; -struct wmi_pdev_chanlist_update_event { - /* number of channels */ - __le32 num_chan; - /* array of channels */ - struct wmi_channel channel_list[1]; +struct wmi_pdev_get_tpc_table_cmd { + __le32 param; +} __packed; + +enum wmi_tpc_pream_2ghz { + WMI_TPC_PREAM_2GHZ_CCK = 0, + WMI_TPC_PREAM_2GHZ_OFDM, + WMI_TPC_PREAM_2GHZ_HT20, + WMI_TPC_PREAM_2GHZ_HT40, + WMI_TPC_PREAM_2GHZ_VHT20, + WMI_TPC_PREAM_2GHZ_VHT40, + WMI_TPC_PREAM_2GHZ_VHT80, +}; + +enum wmi_tpc_pream_5ghz { + WMI_TPC_PREAM_5GHZ_OFDM = 1, + WMI_TPC_PREAM_5GHZ_HT20, + WMI_TPC_PREAM_5GHZ_HT40, + WMI_TPC_PREAM_5GHZ_VHT20, + WMI_TPC_PREAM_5GHZ_VHT40, + WMI_TPC_PREAM_5GHZ_VHT80, + WMI_TPC_PREAM_5GHZ_HTCUP, +}; + +#define WMI_PEER_PS_STATE_DISABLED 2 + +struct wmi_peer_sta_ps_state_chg_event { + struct wmi_mac_addr peer_macaddr; + __le32 peer_ps_state; } __packed; #define WMI_MAX_DEBUG_MESG (sizeof(u32) * 32) @@ -1646,6 +4317,10 @@ struct wmi_pdev_set_channel_cmd { struct wmi_channel chan; } __packed; +struct wmi_pdev_pktlog_enable_cmd { + __le32 ev_bitmap; +} __packed; + /* Customize the DSCP (bit) to TID (0-7) mapping for QOS */ #define WMI_DSCP_MAP_MAX (64) struct wmi_pdev_set_dscp_tid_map_cmd { @@ -1688,14 +4363,84 @@ struct wmi_wmm_params_arg { u32 no_ack; }; -struct wmi_pdev_set_wmm_params_arg { +struct wmi_wmm_params_all_arg { struct wmi_wmm_params_arg ac_be; struct wmi_wmm_params_arg ac_bk; struct wmi_wmm_params_arg ac_vi; struct wmi_wmm_params_arg ac_vo; }; -struct wal_dbg_tx_stats { +struct wmi_pdev_stats_tx { + /* Num HTT cookies queued to dispatch list */ + __le32 comp_queued; + + /* Num HTT cookies dispatched */ + __le32 comp_delivered; + + /* Num MSDU queued to WAL */ + __le32 msdu_enqued; + + /* Num MPDU queue to WAL */ + __le32 mpdu_enqued; + + /* Num MSDUs dropped by WMM limit */ + __le32 wmm_drop; + + /* Num Local frames queued */ + __le32 local_enqued; + + /* Num Local frames done */ + __le32 local_freed; + + /* Num queued to HW */ + __le32 hw_queued; + + /* Num PPDU reaped from HW */ + __le32 hw_reaped; + + /* Num underruns */ + __le32 underrun; + + /* Num PPDUs cleaned up in TX abort */ + __le32 tx_abort; + + /* Num MPDUs requeued by SW */ + __le32 mpdus_requeued; + + /* excessive retries */ + __le32 tx_ko; + + /* data hw rate code */ + __le32 data_rc; + + /* Scheduler self triggers */ + __le32 self_triggers; + + /* frames dropped due to excessive sw retries */ + __le32 sw_retry_failure; + + /* illegal rate phy errors */ + __le32 illgl_rate_phy_err; + + /* wal pdev continuous xretry */ + __le32 pdev_cont_xretry; + + /* wal pdev continuous xretry */ + __le32 pdev_tx_timeout; + + /* wal pdev resets */ + __le32 pdev_resets; + + /* frames dropped due to non-availability of stateless TIDs */ + __le32 stateless_tid_alloc_failure; + + __le32 phy_underrun; + + /* MPDU is more than txop limit */ + __le32 txop_ovf; +} __packed; + +struct wmi_10_4_pdev_stats_tx { /* Num HTT cookies queued to dispatch list */ __le32 comp_queued; @@ -1726,11 +4471,14 @@ struct wal_dbg_tx_stats { /* Num underruns */ __le32 underrun; + /* HW Paused. */ + __le32 hw_paused; + /* Num PPDUs cleaned up in TX abort */ __le32 tx_abort; - /* Num MPDUs requed by SW */ - __le32 mpdus_requed; + /* Num MPDUs requeued by SW */ + __le32 mpdus_requeued; /* excessive retries */ __le32 tx_ko; @@ -1747,22 +4495,57 @@ struct wal_dbg_tx_stats { /* illegal rate phy errors */ __le32 illgl_rate_phy_err; - /* wal pdev continous xretry */ + /* wal pdev continuous xretry */ __le32 pdev_cont_xretry; - /* wal pdev continous xretry */ + /* wal pdev tx timeouts */ __le32 pdev_tx_timeout; /* wal pdev resets */ __le32 pdev_resets; + /* frames dropped due to non-availability of stateless TIDs */ + __le32 stateless_tid_alloc_failure; + __le32 phy_underrun; /* MPDU is more than txop limit */ __le32 txop_ovf; + + /* Number of Sequences posted */ + __le32 seq_posted; + + /* Number of Sequences failed queueing */ + __le32 seq_failed_queueing; + + /* Number of Sequences completed */ + __le32 seq_completed; + + /* Number of Sequences restarted */ + __le32 seq_restarted; + + /* Number of MU Sequences posted */ + __le32 mu_seq_posted; + + /* Num MPDUs flushed by SW, HWPAUSED,SW TXABORT(Reset,channel change) */ + __le32 mpdus_sw_flush; + + /* Num MPDUs filtered by HW, all filter condition (TTL expired) */ + __le32 mpdus_hw_filter; + + /* Num MPDUs truncated by PDG + * (TXOP, TBTT, PPDU_duration based on rate, dyn_bw) + */ + __le32 mpdus_truncated; + + /* Num MPDUs that was tried but didn't receive ACK or BA */ + __le32 mpdus_ack_failed; + + /* Num MPDUs that was dropped due to expiry. */ + __le32 mpdus_expired; } __packed; -struct wal_dbg_rx_stats { +struct wmi_pdev_stats_rx { /* Cnts any change in ring routing mid-ppdu */ __le32 mid_ppdu_route_change; @@ -1796,31 +4579,60 @@ struct wal_dbg_rx_stats { __le32 mpdu_errs; } __packed; -struct wal_dbg_peer_stats { +struct wmi_pdev_stats_peer { /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */ __le32 dummy; } __packed; -struct wal_dbg_stats { - struct wal_dbg_tx_stats tx; - struct wal_dbg_rx_stats rx; - struct wal_dbg_peer_stats peer; -} __packed; - enum wmi_stats_id { - WMI_REQUEST_PEER_STAT = 0x01, - WMI_REQUEST_AP_STAT = 0x02 + WMI_STAT_PEER = BIT(0), + WMI_STAT_AP = BIT(1), + WMI_STAT_PDEV = BIT(2), + WMI_STAT_VDEV = BIT(3), + WMI_STAT_BCNFLT = BIT(4), + WMI_STAT_VDEV_RATE = BIT(5), +}; + +enum wmi_10_4_stats_id { + WMI_10_4_STAT_PEER = BIT(0), + WMI_10_4_STAT_AP = BIT(1), + WMI_10_4_STAT_INST = BIT(2), + WMI_10_4_STAT_PEER_EXTD = BIT(3), + WMI_10_4_STAT_VDEV_EXTD = BIT(4), +}; + +enum wmi_tlv_stats_id { + WMI_TLV_STAT_PEER = BIT(0), + WMI_TLV_STAT_AP = BIT(1), + WMI_TLV_STAT_PDEV = BIT(2), + WMI_TLV_STAT_VDEV = BIT(3), + WMI_TLV_STAT_PEER_EXTD = BIT(10), +}; + +struct wlan_inst_rssi_args { + __le16 cfg_retry_count; + __le16 retry_count; }; struct wmi_request_stats_cmd { __le32 stats_id; - /* - * Space to add parameters like - * peer mac addr - */ + __le32 vdev_id; + + /* peer MAC address */ + struct wmi_mac_addr peer_macaddr; + + /* Instantaneous RSSI arguments */ + struct wlan_inst_rssi_args inst_rssi_args; } __packed; +enum wmi_peer_stats_info_request_type { + /* request stats of one specified peer */ + WMI_REQUEST_ONE_PEER_STATS_INFO = 0x01, + /* request stats of all peers belong to specified VDEV */ + WMI_REQUEST_VDEV_ALL_PEER_STATS_INFO = 0x02, +}; + /* Suspend option */ enum { /* suspend */ @@ -1836,7 +4648,7 @@ struct wmi_pdev_suspend_cmd { } __packed; struct wmi_stats_event { - __le32 stats_id; /* %WMI_REQUEST_ */ + __le32 stats_id; /* WMI_STAT_ */ /* * number of pdev stats event structures * (wmi_pdev_stats) 0 or 1 @@ -1862,32 +4674,115 @@ struct wmi_stats_event { * By having a zero sized array, the pointer to data area * becomes available without increasing the struct size */ - u8 data[0]; + u8 data[]; +} __packed; + +struct wmi_10_2_stats_event { + __le32 stats_id; /* %WMI_REQUEST_ */ + __le32 num_pdev_stats; + __le32 num_pdev_ext_stats; + __le32 num_vdev_stats; + __le32 num_peer_stats; + __le32 num_bcnflt_stats; + u8 data[]; } __packed; /* * PDEV statistics * TODO: add all PDEV stats here */ +struct wmi_pdev_stats_base { + __le32 chan_nf; + __le32 tx_frame_count; /* Cycles spent transmitting frames */ + __le32 rx_frame_count; /* Cycles spent receiving frames */ + __le32 rx_clear_count; /* Total channel busy time, evidently */ + __le32 cycle_count; /* Total on-channel time */ + __le32 phy_err_count; + __le32 chan_tx_pwr; +} __packed; + struct wmi_pdev_stats { - __le32 chan_nf; /* Channel noise floor */ - __le32 tx_frame_count; /* TX frame count */ - __le32 rx_frame_count; /* RX frame count */ - __le32 rx_clear_count; /* rx clear count */ - __le32 cycle_count; /* cycle count */ - __le32 phy_err_count; /* Phy error count */ - __le32 chan_tx_pwr; /* channel tx power */ - struct wal_dbg_stats wal; /* WAL dbg stats */ + struct wmi_pdev_stats_base base; + struct wmi_pdev_stats_tx tx; + struct wmi_pdev_stats_rx rx; + struct wmi_pdev_stats_peer peer; +} __packed; + +struct wmi_pdev_stats_extra { + __le32 ack_rx_bad; + __le32 rts_bad; + __le32 rts_good; + __le32 fcs_bad; + __le32 no_beacons; + __le32 mib_int_count; +} __packed; + +struct wmi_10x_pdev_stats { + struct wmi_pdev_stats_base base; + struct wmi_pdev_stats_tx tx; + struct wmi_pdev_stats_rx rx; + struct wmi_pdev_stats_peer peer; + struct wmi_pdev_stats_extra extra; +} __packed; + +struct wmi_pdev_stats_mem { + __le32 dram_free; + __le32 iram_free; +} __packed; + +struct wmi_10_2_pdev_stats { + struct wmi_pdev_stats_base base; + struct wmi_pdev_stats_tx tx; + __le32 mc_drop; + struct wmi_pdev_stats_rx rx; + __le32 pdev_rx_timeout; + struct wmi_pdev_stats_mem mem; + struct wmi_pdev_stats_peer peer; + struct wmi_pdev_stats_extra extra; +} __packed; + +struct wmi_10_4_pdev_stats { + struct wmi_pdev_stats_base base; + struct wmi_10_4_pdev_stats_tx tx; + struct wmi_pdev_stats_rx rx; + __le32 rx_ovfl_errs; + struct wmi_pdev_stats_mem mem; + __le32 sram_free_size; + struct wmi_pdev_stats_extra extra; } __packed; /* * VDEV statistics - * TODO: add all VDEV stats here */ + +#define WMI_VDEV_STATS_FTM_COUNT_VALID BIT(31) +#define WMI_VDEV_STATS_FTM_COUNT_LSB 0 +#define WMI_VDEV_STATS_FTM_COUNT_MASK 0x7fffffff + struct wmi_vdev_stats { __le32 vdev_id; } __packed; +struct wmi_vdev_stats_extd { + __le32 vdev_id; + __le32 ppdu_aggr_cnt; + __le32 ppdu_noack; + __le32 mpdu_queued; + __le32 ppdu_nonaggr_cnt; + __le32 mpdu_sw_requeued; + __le32 mpdu_suc_retry; + __le32 mpdu_suc_multitry; + __le32 mpdu_fail_retry; + __le32 tx_ftm_suc; + __le32 tx_ftm_suc_retry; + __le32 tx_ftm_fail; + __le32 rx_ftmr_cnt; + __le32 rx_ftmr_dup_cnt; + __le32 rx_iftmr_cnt; + __le32 rx_iftmr_dup_cnt; + __le32 reserved[6]; +} __packed; + /* * peer statistics. * TODO: add more stats @@ -1898,6 +4793,81 @@ struct wmi_peer_stats { __le32 peer_tx_rate; } __packed; +struct wmi_10x_peer_stats { + struct wmi_peer_stats old; + __le32 peer_rx_rate; +} __packed; + +struct wmi_10_2_peer_stats { + struct wmi_peer_stats old; + __le32 peer_rx_rate; + __le32 current_per; + __le32 retries; + __le32 tx_rate_count; + __le32 max_4ms_frame_len; + __le32 total_sub_frames; + __le32 tx_bytes; + __le32 num_pkt_loss_overflow[4]; + __le32 num_pkt_loss_excess_retry[4]; +} __packed; + +struct wmi_10_2_4_peer_stats { + struct wmi_10_2_peer_stats common; + __le32 peer_rssi_changed; +} __packed; + +struct wmi_10_2_4_ext_peer_stats { + struct wmi_10_2_peer_stats common; + __le32 peer_rssi_changed; + __le32 rx_duration; +} __packed; + +struct wmi_10_4_peer_stats { + struct wmi_mac_addr peer_macaddr; + __le32 peer_rssi; + __le32 peer_rssi_seq_num; + __le32 peer_tx_rate; + __le32 peer_rx_rate; + __le32 current_per; + __le32 retries; + __le32 tx_rate_count; + __le32 max_4ms_frame_len; + __le32 total_sub_frames; + __le32 tx_bytes; + __le32 num_pkt_loss_overflow[4]; + __le32 num_pkt_loss_excess_retry[4]; + __le32 peer_rssi_changed; +} __packed; + +struct wmi_10_4_peer_extd_stats { + struct wmi_mac_addr peer_macaddr; + __le32 inactive_time; + __le32 peer_chain_rssi; + __le32 rx_duration; + __le32 reserved[10]; +} __packed; + +struct wmi_10_4_bss_bcn_stats { + __le32 vdev_id; + __le32 bss_bcns_dropped; + __le32 bss_bcn_delivered; +} __packed; + +struct wmi_10_4_bss_bcn_filter_stats { + __le32 bcns_dropped; + __le32 bcns_delivered; + __le32 active_filters; + struct wmi_10_4_bss_bcn_stats bss_stats; +} __packed; + +struct wmi_10_2_pdev_ext_stats { + __le32 rx_rssi_comb; + __le32 rx_rssi[4]; + __le32 rx_mcs[10]; + __le32 tx_mcs[10]; + __le32 ack_rssi; +} __packed; + struct wmi_vdev_create_cmd { __le32 vdev_id; __le32 vdev_type; @@ -1913,10 +4883,40 @@ enum wmi_vdev_type { }; enum wmi_vdev_subtype { - WMI_VDEV_SUBTYPE_NONE = 0, - WMI_VDEV_SUBTYPE_P2P_DEVICE = 1, - WMI_VDEV_SUBTYPE_P2P_CLIENT = 2, - WMI_VDEV_SUBTYPE_P2P_GO = 3, + WMI_VDEV_SUBTYPE_NONE, + WMI_VDEV_SUBTYPE_P2P_DEVICE, + WMI_VDEV_SUBTYPE_P2P_CLIENT, + WMI_VDEV_SUBTYPE_P2P_GO, + WMI_VDEV_SUBTYPE_PROXY_STA, + WMI_VDEV_SUBTYPE_MESH_11S, + WMI_VDEV_SUBTYPE_MESH_NON_11S, +}; + +enum wmi_vdev_subtype_legacy { + WMI_VDEV_SUBTYPE_LEGACY_NONE = 0, + WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV = 1, + WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI = 2, + WMI_VDEV_SUBTYPE_LEGACY_P2P_GO = 3, + WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA = 4, +}; + +enum wmi_vdev_subtype_10_2_4 { + WMI_VDEV_SUBTYPE_10_2_4_NONE = 0, + WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV = 1, + WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI = 2, + WMI_VDEV_SUBTYPE_10_2_4_P2P_GO = 3, + WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA = 4, + WMI_VDEV_SUBTYPE_10_2_4_MESH_11S = 5, +}; + +enum wmi_vdev_subtype_10_4 { + WMI_VDEV_SUBTYPE_10_4_NONE = 0, + WMI_VDEV_SUBTYPE_10_4_P2P_DEV = 1, + WMI_VDEV_SUBTYPE_10_4_P2P_CLI = 2, + WMI_VDEV_SUBTYPE_10_4_P2P_GO = 3, + WMI_VDEV_SUBTYPE_10_4_PROXY_STA = 4, + WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S = 5, + WMI_VDEV_SUBTYPE_10_4_MESH_11S = 6, }; /* values for vdev_subtype */ @@ -1924,15 +4924,17 @@ enum wmi_vdev_subtype { /* values for vdev_start_request flags */ /* * Indicates that AP VDEV uses hidden ssid. only valid for - * AP/GO */ -#define WMI_VDEV_START_HIDDEN_SSID (1<<0) + * AP/GO + */ +#define WMI_VDEV_START_HIDDEN_SSID (1 << 0) /* * Indicates if robust management frame/management frame * protection is enabled. For GO/AP vdevs, it indicates that * it may support station/client associations with RMF enabled. * For STA/client vdevs, it indicates that sta will - * associate with AP with RMF enabled. */ -#define WMI_VDEV_START_PMF_ENABLED (1<<1) + * associate with AP with RMF enabled. + */ +#define WMI_VDEV_START_PMF_ENABLED (1 << 1) struct wmi_p2p_noa_descriptor { __le32 type_count; /* 255: continuous schedule, 0: reserved */ @@ -1956,9 +4958,9 @@ struct wmi_vdev_start_request_cmd { __le32 flags; /* ssid field. Only valid for AP/GO/IBSS/BTAmp VDEV type. */ struct wmi_ssid ssid; - /* beacon/probe reponse xmit rate. Applicable for SoftAP. */ + /* beacon/probe response xmit rate. Applicable for SoftAP. */ __le32 bcn_tx_rate; - /* beacon/probe reponse xmit power. Applicable for SoftAP. */ + /* beacon/probe response xmit power. Applicable for SoftAP. */ __le32 bcn_tx_power; /* number of p2p NOA descriptor(s) from scan entry */ __le32 num_noa_descriptors; @@ -2036,14 +5038,30 @@ struct wmi_key_seq_counter { __le32 key_seq_counter_h; } __packed; -#define WMI_CIPHER_NONE 0x0 /* clear key */ -#define WMI_CIPHER_WEP 0x1 -#define WMI_CIPHER_TKIP 0x2 -#define WMI_CIPHER_AES_OCB 0x3 -#define WMI_CIPHER_AES_CCM 0x4 -#define WMI_CIPHER_WAPI 0x5 -#define WMI_CIPHER_CKIP 0x6 -#define WMI_CIPHER_AES_CMAC 0x7 +enum wmi_cipher_suites { + WMI_CIPHER_NONE, + WMI_CIPHER_WEP, + WMI_CIPHER_TKIP, + WMI_CIPHER_AES_OCB, + WMI_CIPHER_AES_CCM, + WMI_CIPHER_WAPI, + WMI_CIPHER_CKIP, + WMI_CIPHER_AES_CMAC, + WMI_CIPHER_AES_GCM, +}; + +enum wmi_tlv_cipher_suites { + WMI_TLV_CIPHER_NONE, + WMI_TLV_CIPHER_WEP, + WMI_TLV_CIPHER_TKIP, + WMI_TLV_CIPHER_AES_OCB, + WMI_TLV_CIPHER_AES_CCM, + WMI_TLV_CIPHER_WAPI, + WMI_TLV_CIPHER_CKIP, + WMI_TLV_CIPHER_AES_CMAC, + WMI_TLV_CIPHER_ANY, + WMI_TLV_CIPHER_AES_GCM, +}; struct wmi_vdev_install_key_cmd { __le32 vdev_id; @@ -2061,7 +5079,7 @@ struct wmi_vdev_install_key_cmd { __le32 key_rxmic_len; /* contains key followed by tx mic followed by rx mic */ - u8 key_data[0]; + u8 key_data[]; } __packed; struct wmi_vdev_install_key_arg { @@ -2076,6 +5094,18 @@ struct wmi_vdev_install_key_arg { const void *key_data; }; +/* + * vdev fixed rate format: + * - preamble - b7:b6 - see WMI_RATE_PREMABLE_ + * - nss - b5:b4 - ss number (0 mean 1ss) + * - rate_mcs - b3:b0 - as below + * CCK: 0 - 11Mbps, 1 - 5,5Mbps, 2 - 2Mbps, 3 - 1Mbps, + * 4 - 11Mbps (s), 5 - 5,5Mbps (s), 6 - 2Mbps (s) + * OFDM: 0 - 48Mbps, 1 - 24Mbps, 2 - 12Mbps, 3 - 6Mbps, + * 4 - 54Mbps, 5 - 36Mbps, 6 - 18Mbps, 7 - 9Mbps + * HT/VHT: MCS index + */ + /* Preamble types to be used with VDEV fixed rate configuration */ enum wmi_rate_preamble { WMI_RATE_PREAMBLE_OFDM, @@ -2084,9 +5114,124 @@ enum wmi_rate_preamble { WMI_RATE_PREAMBLE_VHT, }; +#define ATH10K_HW_NSS(rate) (1 + (((rate) >> 4) & 0x3)) +#define ATH10K_HW_PREAMBLE(rate) (((rate) >> 6) & 0x3) +#define ATH10K_HW_MCS_RATE(rate) ((rate) & 0xf) +#define ATH10K_HW_LEGACY_RATE(rate) ((rate) & 0x3f) +#define ATH10K_HW_BW(flags) (((flags) >> 3) & 0x3) +#define ATH10K_HW_GI(flags) (((flags) >> 5) & 0x1) +#define ATH10K_HW_RATECODE(rate, nss, preamble) \ + (((preamble) << 6) | ((nss) << 4) | (rate)) +#define ATH10K_HW_AMPDU(flags) ((flags) & 0x1) +#define ATH10K_HW_BA_FAIL(flags) (((flags) >> 1) & 0x3) +#define ATH10K_FW_SKIPPED_RATE_CTRL(flags) (((flags) >> 6) & 0x1) + +#define ATH10K_VHT_MCS_NUM 10 +#define ATH10K_BW_NUM 6 +#define ATH10K_NSS_NUM 4 +#define ATH10K_LEGACY_NUM 12 +#define ATH10K_GI_NUM 2 +#define ATH10K_HT_MCS_NUM 32 +#define ATH10K_RATE_TABLE_NUM 320 +#define ATH10K_RATE_INFO_FLAGS_SGI_BIT 2 + /* Value to disable fixed rate setting */ #define WMI_FIXED_RATE_NONE (0xff) +struct wmi_peer_param_map { + u32 smps_state; + u32 ampdu; + u32 authorize; + u32 chan_width; + u32 nss; + u32 use_4addr; + u32 membership; + u32 use_fixed_power; + u32 user_pos; + u32 crit_proto_hint_enabled; + u32 tx_fail_cnt_thr; + u32 set_hw_retry_cts2s; + u32 ibss_atim_win_len; + u32 debug; + u32 phymode; + u32 dummy_var; +}; + +struct wmi_vdev_param_map { + u32 rts_threshold; + u32 fragmentation_threshold; + u32 beacon_interval; + u32 listen_interval; + u32 multicast_rate; + u32 mgmt_tx_rate; + u32 slot_time; + u32 preamble; + u32 swba_time; + u32 wmi_vdev_stats_update_period; + u32 wmi_vdev_pwrsave_ageout_time; + u32 wmi_vdev_host_swba_interval; + u32 dtim_period; + u32 wmi_vdev_oc_scheduler_air_time_limit; + u32 wds; + u32 atim_window; + u32 bmiss_count_max; + u32 bmiss_first_bcnt; + u32 bmiss_final_bcnt; + u32 feature_wmm; + u32 chwidth; + u32 chextoffset; + u32 disable_htprotection; + u32 sta_quickkickout; + u32 mgmt_rate; + u32 protection_mode; + u32 fixed_rate; + u32 sgi; + u32 ldpc; + u32 tx_stbc; + u32 rx_stbc; + u32 intra_bss_fwd; + u32 def_keyid; + u32 nss; + u32 bcast_data_rate; + u32 mcast_data_rate; + u32 mcast_indicate; + u32 dhcp_indicate; + u32 unknown_dest_indicate; + u32 ap_keepalive_min_idle_inactive_time_secs; + u32 ap_keepalive_max_idle_inactive_time_secs; + u32 ap_keepalive_max_unresponsive_time_secs; + u32 ap_enable_nawds; + u32 mcast2ucast_set; + u32 enable_rtscts; + u32 txbf; + u32 packet_powersave; + u32 drop_unencry; + u32 tx_encap_type; + u32 ap_detect_out_of_sync_sleeping_sta_time_secs; + u32 rc_num_retries; + u32 cabq_maxdur; + u32 mfptest_set; + u32 rts_fixed_rate; + u32 vht_sgimask; + u32 vht80_ratemask; + u32 early_rx_adjust_enable; + u32 early_rx_tgt_bmiss_num; + u32 early_rx_bmiss_sample_cycle; + u32 early_rx_slop_step; + u32 early_rx_init_slop; + u32 early_rx_adjust_pause; + u32 proxy_sta; + u32 meru_vc; + u32 rx_decap_type; + u32 bw_nss_ratemask; + u32 inc_tsf; + u32 dec_tsf; + u32 disable_4addr_src_lrn; + u32 rtt_responder_role; +}; + +#define WMI_VDEV_PARAM_UNSUPPORTED 0 + /* the definition of different VDEV parameters */ enum wmi_vdev_param { /* RTS Threshold */ @@ -2097,7 +5242,7 @@ enum wmi_vdev_param { WMI_VDEV_PARAM_BEACON_INTERVAL, /* Listen interval in TUs */ WMI_VDEV_PARAM_LISTEN_INTERVAL, - /* muticast rate in Mbps */ + /* multicast rate in Mbps */ WMI_VDEV_PARAM_MULTICAST_RATE, /* management frame rate in Mbps */ WMI_VDEV_PARAM_MGMT_TX_RATE, @@ -2123,7 +5268,7 @@ enum wmi_vdev_param { * scheduler. */ WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, - /* enable/dsiable WDS for this VDEV */ + /* enable/disable WDS for this VDEV */ WMI_VDEV_PARAM_WDS, /* ATIM Window */ WMI_VDEV_PARAM_ATIM_WINDOW, @@ -2193,7 +5338,8 @@ enum wmi_vdev_param { * An associated STA is considered unresponsive if there is no recent * TX/RX activity and downlink frames are buffered for it. Once a STA * exceeds the maximum unresponsive time, the AP will send a - * WMI_STA_KICKOUT event to the host so the STA can be deleted. */ + * WMI_STA_KICKOUT event to the host so the STA can be deleted. + */ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, /* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */ @@ -2218,6 +5364,234 @@ enum wmi_vdev_param { WMI_VDEV_PARAM_TX_ENCAP_TYPE, }; +/* the definition of different VDEV parameters */ +enum wmi_10x_vdev_param { + /* RTS Threshold */ + WMI_10X_VDEV_PARAM_RTS_THRESHOLD = 0x1, + /* Fragmentation threshold */ + WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD, + /* beacon interval in TUs */ + WMI_10X_VDEV_PARAM_BEACON_INTERVAL, + /* Listen interval in TUs */ + WMI_10X_VDEV_PARAM_LISTEN_INTERVAL, + /* multicast rate in Mbps */ + WMI_10X_VDEV_PARAM_MULTICAST_RATE, + /* management frame rate in Mbps */ + WMI_10X_VDEV_PARAM_MGMT_TX_RATE, + /* slot time (long vs short) */ + WMI_10X_VDEV_PARAM_SLOT_TIME, + /* preamble (long vs short) */ + WMI_10X_VDEV_PARAM_PREAMBLE, + /* SWBA time (time before tbtt in msec) */ + WMI_10X_VDEV_PARAM_SWBA_TIME, + /* time period for updating VDEV stats */ + WMI_10X_VDEV_STATS_UPDATE_PERIOD, + /* age out time in msec for frames queued for station in power save */ + WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME, + /* + * Host SWBA interval (time in msec before tbtt for SWBA event + * generation). + */ + WMI_10X_VDEV_HOST_SWBA_INTERVAL, + /* DTIM period (specified in units of num beacon intervals) */ + WMI_10X_VDEV_PARAM_DTIM_PERIOD, + /* + * scheduler air time limit for this VDEV. used by off chan + * scheduler. + */ + WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, + /* enable/disable WDS for this VDEV */ + WMI_10X_VDEV_PARAM_WDS, + /* ATIM Window */ + WMI_10X_VDEV_PARAM_ATIM_WINDOW, + /* BMISS max */ + WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX, + /* WMM enables/disabled */ + WMI_10X_VDEV_PARAM_FEATURE_WMM, + /* Channel width */ + WMI_10X_VDEV_PARAM_CHWIDTH, + /* Channel Offset */ + WMI_10X_VDEV_PARAM_CHEXTOFFSET, + /* Disable HT Protection */ + WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION, + /* Quick STA Kickout */ + WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT, + /* Rate to be used with Management frames */ + WMI_10X_VDEV_PARAM_MGMT_RATE, + /* Protection Mode */ + WMI_10X_VDEV_PARAM_PROTECTION_MODE, + /* Fixed rate setting */ + WMI_10X_VDEV_PARAM_FIXED_RATE, + /* Short GI Enable/Disable */ + WMI_10X_VDEV_PARAM_SGI, + /* Enable LDPC */ + WMI_10X_VDEV_PARAM_LDPC, + /* Enable Tx STBC */ + WMI_10X_VDEV_PARAM_TX_STBC, + /* Enable Rx STBC */ + WMI_10X_VDEV_PARAM_RX_STBC, + /* Intra BSS forwarding */ + WMI_10X_VDEV_PARAM_INTRA_BSS_FWD, + /* Setting Default xmit key for Vdev */ + WMI_10X_VDEV_PARAM_DEF_KEYID, + /* NSS width */ + WMI_10X_VDEV_PARAM_NSS, + /* Set the custom rate for the broadcast data frames */ + WMI_10X_VDEV_PARAM_BCAST_DATA_RATE, + /* Set the custom rate (rate-code) for multicast data frames */ + WMI_10X_VDEV_PARAM_MCAST_DATA_RATE, + /* Tx multicast packet indicate Enable/Disable */ + WMI_10X_VDEV_PARAM_MCAST_INDICATE, + /* Tx DHCP packet indicate Enable/Disable */ + WMI_10X_VDEV_PARAM_DHCP_INDICATE, + /* Enable host inspection of Tx unicast packet to unknown destination */ + WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE, + + /* The minimum amount of time AP begins to consider STA inactive */ + WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, + + /* + * An associated STA is considered inactive when there is no recent + * TX/RX activity and no downlink frames are buffered for it. Once a + * STA exceeds the maximum idle inactive time, the AP will send an + * 802.11 data-null as a keep alive to verify the STA is still + * associated. If the STA does ACK the data-null, or if the data-null + * is buffered and the STA does not retrieve it, the STA will be + * considered unresponsive + * (see WMI_10X_VDEV_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS). + */ + WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, + + /* + * An associated STA is considered unresponsive if there is no recent + * TX/RX activity and downlink frames are buffered for it. Once a STA + * exceeds the maximum unresponsive time, the AP will send a + * WMI_10X_STA_KICKOUT event to the host so the STA can be deleted. + */ + WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, + + /* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */ + WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS, + + WMI_10X_VDEV_PARAM_MCAST2UCAST_SET, + /* Enable/Disable RTS-CTS */ + WMI_10X_VDEV_PARAM_ENABLE_RTSCTS, + + WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, + + /* following are available as of firmware 10.2 */ + WMI_10X_VDEV_PARAM_TX_ENCAP_TYPE, + WMI_10X_VDEV_PARAM_CABQ_MAXDUR, + WMI_10X_VDEV_PARAM_MFPTEST_SET, + WMI_10X_VDEV_PARAM_RTS_FIXED_RATE, + WMI_10X_VDEV_PARAM_VHT_SGIMASK, + WMI_10X_VDEV_PARAM_VHT80_RATEMASK, + WMI_10X_VDEV_PARAM_TSF_INCREMENT, +}; + +enum wmi_10_4_vdev_param { + WMI_10_4_VDEV_PARAM_RTS_THRESHOLD = 0x1, + WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD, + WMI_10_4_VDEV_PARAM_BEACON_INTERVAL, + WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL, + WMI_10_4_VDEV_PARAM_MULTICAST_RATE, + WMI_10_4_VDEV_PARAM_MGMT_TX_RATE, + WMI_10_4_VDEV_PARAM_SLOT_TIME, + WMI_10_4_VDEV_PARAM_PREAMBLE, + WMI_10_4_VDEV_PARAM_SWBA_TIME, + WMI_10_4_VDEV_STATS_UPDATE_PERIOD, + WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME, + WMI_10_4_VDEV_HOST_SWBA_INTERVAL, + WMI_10_4_VDEV_PARAM_DTIM_PERIOD, + WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, + WMI_10_4_VDEV_PARAM_WDS, + WMI_10_4_VDEV_PARAM_ATIM_WINDOW, + WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX, + WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT, + WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT, + WMI_10_4_VDEV_PARAM_FEATURE_WMM, + WMI_10_4_VDEV_PARAM_CHWIDTH, + WMI_10_4_VDEV_PARAM_CHEXTOFFSET, + WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION, + WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT, + WMI_10_4_VDEV_PARAM_MGMT_RATE, + WMI_10_4_VDEV_PARAM_PROTECTION_MODE, + WMI_10_4_VDEV_PARAM_FIXED_RATE, + WMI_10_4_VDEV_PARAM_SGI, + WMI_10_4_VDEV_PARAM_LDPC, + WMI_10_4_VDEV_PARAM_TX_STBC, + WMI_10_4_VDEV_PARAM_RX_STBC, + WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD, + WMI_10_4_VDEV_PARAM_DEF_KEYID, + WMI_10_4_VDEV_PARAM_NSS, + WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE, + WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE, + WMI_10_4_VDEV_PARAM_MCAST_INDICATE, + WMI_10_4_VDEV_PARAM_DHCP_INDICATE, + WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE, + WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, + WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, + WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, + WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS, + WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET, + WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS, + WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES, + WMI_10_4_VDEV_PARAM_TXBF, + WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE, + WMI_10_4_VDEV_PARAM_DROP_UNENCRY, + WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE, + WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, + WMI_10_4_VDEV_PARAM_CABQ_MAXDUR, + WMI_10_4_VDEV_PARAM_MFPTEST_SET, + WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE, + WMI_10_4_VDEV_PARAM_VHT_SGIMASK, + WMI_10_4_VDEV_PARAM_VHT80_RATEMASK, + WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE, + WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM, + WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE, + WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP, + WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP, + WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE, + WMI_10_4_VDEV_PARAM_PROXY_STA, + WMI_10_4_VDEV_PARAM_MERU_VC, + WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE, + WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK, + WMI_10_4_VDEV_PARAM_SENSOR_AP, + WMI_10_4_VDEV_PARAM_BEACON_RATE, + WMI_10_4_VDEV_PARAM_DTIM_ENABLE_CTS, + WMI_10_4_VDEV_PARAM_STA_KICKOUT, + WMI_10_4_VDEV_PARAM_CAPABILITIES, + WMI_10_4_VDEV_PARAM_TSF_INCREMENT, + WMI_10_4_VDEV_PARAM_RX_FILTER, + WMI_10_4_VDEV_PARAM_MGMT_TX_POWER, + WMI_10_4_VDEV_PARAM_ATF_SSID_SCHED_POLICY, + WMI_10_4_VDEV_PARAM_DISABLE_DYN_BW_RTS, + WMI_10_4_VDEV_PARAM_TSF_DECREMENT, + WMI_10_4_VDEV_PARAM_SELFGEN_FIXED_RATE, + WMI_10_4_VDEV_PARAM_AMPDU_SUBFRAME_SIZE_PER_AC, + WMI_10_4_VDEV_PARAM_NSS_VHT160, + WMI_10_4_VDEV_PARAM_NSS_VHT80_80, + WMI_10_4_VDEV_PARAM_AMSDU_SUBFRAME_SIZE_PER_AC, + WMI_10_4_VDEV_PARAM_DISABLE_CABQ, + WMI_10_4_VDEV_PARAM_SIFS_TRIGGER_RATE, + WMI_10_4_VDEV_PARAM_TX_POWER, + WMI_10_4_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE, + WMI_10_4_VDEV_PARAM_DISABLE_4_ADDR_SRC_LRN, +}; + +#define WMI_VDEV_DISABLE_4_ADDR_SRC_LRN 1 + +#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0) +#define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1) +#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2) +#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3) + +#define WMI_TXBF_STS_CAP_OFFSET_LSB 4 +#define WMI_TXBF_STS_CAP_OFFSET_MASK 0x70 +#define WMI_TXBF_CONF_IMPLICIT_BF BIT(7) +#define WMI_BF_SOUND_DIM_OFFSET_LSB 8 +#define WMI_BF_SOUND_DIM_OFFSET_MASK 0xf00 + /* slot time long */ #define WMI_VDEV_SLOT_TIME_LONG 0x1 /* slot time short */ @@ -2264,7 +5638,7 @@ struct wmi_vdev_simple_event { } __packed; /* VDEV start response status codes */ -/* VDEV succesfully started */ +/* VDEV successfully started */ #define WMI_INIFIED_VDEV_START_RESPONSE_STATUS_SUCCESS 0x0 /* requested VDEV not found */ @@ -2273,6 +5647,98 @@ struct wmi_vdev_simple_event { /* unsupported VDEV combination */ #define WMI_INIFIED_VDEV_START_RESPONSE_NOT_SUPPORTED 0x2 +/* TODO: please add more comments if you have in-depth information */ +struct wmi_vdev_spectral_conf_cmd { + __le32 vdev_id; + + /* number of fft samples to send (0 for infinite) */ + __le32 scan_count; + __le32 scan_period; + __le32 scan_priority; + + /* number of bins in the FFT: 2^(fft_size - bin_scale) */ + __le32 scan_fft_size; + __le32 scan_gc_ena; + __le32 scan_restart_ena; + __le32 scan_noise_floor_ref; + __le32 scan_init_delay; + __le32 scan_nb_tone_thr; + __le32 scan_str_bin_thr; + __le32 scan_wb_rpt_mode; + __le32 scan_rssi_rpt_mode; + __le32 scan_rssi_thr; + __le32 scan_pwr_format; + + /* rpt_mode: Format of FFT report to software for spectral scan + * triggered FFTs: + * 0: No FFT report (only spectral scan summary report) + * 1: 2-dword summary of metrics for each completed FFT + spectral + * scan summary report + * 2: 2-dword summary of metrics for each completed FFT + + * 1x- oversampled bins(in-band) per FFT + spectral scan summary + * report + * 3: 2-dword summary of metrics for each completed FFT + + * 2x- oversampled bins (all) per FFT + spectral scan summary + */ + __le32 scan_rpt_mode; + __le32 scan_bin_scale; + __le32 scan_dbm_adj; + __le32 scan_chn_mask; +} __packed; + +struct wmi_vdev_spectral_conf_arg { + u32 vdev_id; + u32 scan_count; + u32 scan_period; + u32 scan_priority; + u32 scan_fft_size; + u32 scan_gc_ena; + u32 scan_restart_ena; + u32 scan_noise_floor_ref; + u32 scan_init_delay; + u32 scan_nb_tone_thr; + u32 scan_str_bin_thr; + u32 scan_wb_rpt_mode; + u32 scan_rssi_rpt_mode; + u32 scan_rssi_thr; + u32 scan_pwr_format; + u32 scan_rpt_mode; + u32 scan_bin_scale; + u32 scan_dbm_adj; + u32 scan_chn_mask; +}; + +#define WMI_SPECTRAL_ENABLE_DEFAULT 0 +#define WMI_SPECTRAL_COUNT_DEFAULT 0 +#define WMI_SPECTRAL_PERIOD_DEFAULT 35 +#define WMI_SPECTRAL_PRIORITY_DEFAULT 1 +#define WMI_SPECTRAL_FFT_SIZE_DEFAULT 7 +#define WMI_SPECTRAL_GC_ENA_DEFAULT 1 +#define WMI_SPECTRAL_RESTART_ENA_DEFAULT 0 +#define WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT -96 +#define WMI_SPECTRAL_INIT_DELAY_DEFAULT 80 +#define WMI_SPECTRAL_NB_TONE_THR_DEFAULT 12 +#define WMI_SPECTRAL_STR_BIN_THR_DEFAULT 8 +#define WMI_SPECTRAL_WB_RPT_MODE_DEFAULT 0 +#define WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT 0 +#define WMI_SPECTRAL_RSSI_THR_DEFAULT 0xf0 +#define WMI_SPECTRAL_PWR_FORMAT_DEFAULT 0 +#define WMI_SPECTRAL_RPT_MODE_DEFAULT 2 +#define WMI_SPECTRAL_BIN_SCALE_DEFAULT 1 +#define WMI_SPECTRAL_DBM_ADJ_DEFAULT 1 +#define WMI_SPECTRAL_CHN_MASK_DEFAULT 1 + +struct wmi_vdev_spectral_enable_cmd { + __le32 vdev_id; + __le32 trigger_cmd; + __le32 enable_cmd; +} __packed; + +#define WMI_SPECTRAL_TRIGGER_CMD_TRIGGER 1 +#define WMI_SPECTRAL_TRIGGER_CMD_CLEAR 2 +#define WMI_SPECTRAL_ENABLE_CMD_ENABLE 1 +#define WMI_SPECTRAL_ENABLE_CMD_DISABLE 2 + /* Beacon processing related command and event structures */ struct wmi_bcn_tx_hdr { __le32 vdev_id; @@ -2283,7 +5749,7 @@ struct wmi_bcn_tx_hdr { struct wmi_bcn_tx_cmd { struct wmi_bcn_tx_hdr hdr; - u8 *bcn[0]; + u8 *bcn[]; } __packed; struct wmi_bcn_tx_arg { @@ -2294,6 +5760,31 @@ struct wmi_bcn_tx_arg { const void *bcn; }; +enum wmi_bcn_tx_ref_flags { + WMI_BCN_TX_REF_FLAG_DTIM_ZERO = 0x1, + WMI_BCN_TX_REF_FLAG_DELIVER_CAB = 0x2, +}; + +/* TODO: It is unclear why "no antenna" works while any other seemingly valid + * chainmask yields no beacons on the air at all. + */ +#define WMI_BCN_TX_REF_DEF_ANTENNA 0 + +struct wmi_bcn_tx_ref_cmd { + __le32 vdev_id; + __le32 data_len; + /* physical address of the frame - dma pointer */ + __le32 data_ptr; + /* id for host to track */ + __le32 msdu_id; + /* frame ctrl to setup PPDU desc */ + __le32 frame_control; + /* to control CABQ traffic: WMI_BCN_TX_REF_FLAG_ */ + __le32 flags; + /* introduced in 10.2 */ + __le32 antenna_mask; +} __packed; + /* Beacon filter */ #define WMI_BCN_FILTER_ALL 0 /* Filter all beacons */ #define WMI_BCN_FILTER_NONE 1 /* Pass all beacons */ @@ -2329,30 +5820,6 @@ struct wmi_bcn_prb_info { /* app IE */ } __packed; -struct wmi_bcn_tmpl_cmd { - /* unique id identifying the VDEV, generated by the caller */ - __le32 vdev_id; - /* TIM IE offset from the beginning of the template. */ - __le32 tim_ie_offset; - /* beacon probe capabilities and IEs */ - struct wmi_bcn_prb_info bcn_prb_info; - /* beacon buffer length */ - __le32 buf_len; - /* variable length data */ - u8 data[1]; -} __packed; - -struct wmi_prb_tmpl_cmd { - /* unique id identifying the VDEV, generated by the caller */ - __le32 vdev_id; - /* beacon probe capabilities and IEs */ - struct wmi_bcn_prb_info bcn_prb_info; - /* beacon buffer length */ - __le32 buf_len; - /* Variable length data */ - u8 data[1]; -} __packed; - enum wmi_sta_ps_mode { /* enable power save for the given STA VDEV */ WMI_STA_PS_MODE_DISABLED = 0, @@ -2441,9 +5908,16 @@ enum wmi_sta_ps_param_tx_wake_threshold { enum wmi_sta_ps_param_pspoll_count { WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0, /* - * Values greater than 0 indicate the maximum numer of PS-Poll frames + * Values greater than 0 indicate the maximum number of PS-Poll frames * FW will send before waking up. */ + + /* When u-APSD is enabled the firmware will be very reluctant to exit + * STA PS. This could result in very poor Rx performance with STA doing + * PS-Poll for each and every buffered frame. This value is a bit + * arbitrary. + */ + WMI_STA_PS_PSPOLL_COUNT_UAPSD = 3, }; /* @@ -2456,7 +5930,7 @@ enum wmi_sta_ps_param_pspoll_count { #define WMI_UAPSD_AC_TYPE_TRIG 1 #define WMI_UAPSD_AC_BIT_MASK(ac, type) \ - ((type == WMI_UAPSD_AC_TYPE_DELI) ? (1<<(ac<<1)) : (1<<((ac<<1)+1))) + (type == WMI_UAPSD_AC_TYPE_DELI ? 1 << (ac << 1) : 1 << ((ac << 1) + 1)) enum wmi_sta_ps_param_uapsd { WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0), @@ -2469,6 +5943,30 @@ enum wmi_sta_ps_param_uapsd { WMI_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7), }; +#define WMI_STA_UAPSD_MAX_INTERVAL_MSEC UINT_MAX + +struct wmi_sta_uapsd_auto_trig_param { + __le32 wmm_ac; + __le32 user_priority; + __le32 service_interval; + __le32 suspend_interval; + __le32 delay_interval; +}; + +struct wmi_sta_uapsd_auto_trig_cmd_fixed_param { + __le32 vdev_id; + struct wmi_mac_addr peer_macaddr; + __le32 num_ac; +}; + +struct wmi_sta_uapsd_auto_trig_arg { + u32 wmm_ac; + u32 user_priority; + u32 service_interval; + u32 suspend_interval; + u32 delay_interval; +}; + enum wmi_sta_powersave_param { /* * Controls how frames are retrievd from AP while STA is sleeping @@ -2606,6 +6104,14 @@ struct wmi_tim_info { __le32 tim_num_ps_pending; } __packed; +struct wmi_tim_info_arg { + __le32 tim_len; + __le32 tim_mcast; + const __le32 *tim_bitmap; + __le32 tim_changed; + __le32 tim_num_ps_pending; +} __packed; + /* Maximum number of NOA Descriptors supported */ #define WMI_P2P_MAX_NOA_DESCRIPTORS 4 #define WMI_P2P_OPPPS_ENABLE_BIT BIT(0) @@ -2614,12 +6120,14 @@ struct wmi_tim_info { struct wmi_p2p_noa_info { /* Bit 0 - Flag to indicate an update in NOA schedule - Bits 7-1 - Reserved */ + * Bits 7-1 - Reserved + */ u8 changed; /* NOA index */ u8 index; /* Bit 0 - Opp PS state of the AP - Bits 1-7 - Ctwindow in TUs */ + * Bits 1-7 - Ctwindow in TUs + */ u8 ctwindow_oppps; /* Number of NOA descriptors */ u8 num_descriptors; @@ -2634,7 +6142,58 @@ struct wmi_bcn_info { struct wmi_host_swba_event { __le32 vdev_map; - struct wmi_bcn_info bcn_info[1]; + struct wmi_bcn_info bcn_info[]; +} __packed; + +struct wmi_10_2_4_bcn_info { + struct wmi_tim_info tim_info; + /* The 10.2.4 FW doesn't have p2p NOA info */ +} __packed; + +struct wmi_10_2_4_host_swba_event { + __le32 vdev_map; + struct wmi_10_2_4_bcn_info bcn_info[]; +} __packed; + +/* 16 words = 512 client + 1 word = for guard */ +#define WMI_10_4_TIM_BITMAP_ARRAY_SIZE 17 + +struct wmi_10_4_tim_info { + __le32 tim_len; + __le32 tim_mcast; + __le32 tim_bitmap[WMI_10_4_TIM_BITMAP_ARRAY_SIZE]; + __le32 tim_changed; + __le32 tim_num_ps_pending; +} __packed; + +#define WMI_10_4_P2P_MAX_NOA_DESCRIPTORS 1 + +struct wmi_10_4_p2p_noa_info { + /* Bit 0 - Flag to indicate an update in NOA schedule + * Bits 7-1 - Reserved + */ + u8 changed; + /* NOA index */ + u8 index; + /* Bit 0 - Opp PS state of the AP + * Bits 1-7 - Ctwindow in TUs + */ + u8 ctwindow_oppps; + /* Number of NOA descriptors */ + u8 num_descriptors; + + struct wmi_p2p_noa_descriptor + noa_descriptors[WMI_10_4_P2P_MAX_NOA_DESCRIPTORS]; +} __packed; + +struct wmi_10_4_bcn_info { + struct wmi_10_4_tim_info tim_info; + struct wmi_10_4_p2p_noa_info p2p_noa_info; +} __packed; + +struct wmi_10_4_host_swba_event { + __le32 vdev_map; + struct wmi_10_4_bcn_info bcn_info[]; } __packed; #define WMI_MAX_AP_VDEV 16 @@ -2644,12 +6203,18 @@ struct wmi_tbtt_offset_event { __le32 tbttoffset_list[WMI_MAX_AP_VDEV]; } __packed; - struct wmi_peer_create_cmd { __le32 vdev_id; struct wmi_mac_addr peer_macaddr; + __le32 peer_type; } __packed; +enum wmi_peer_type { + WMI_PEER_TYPE_DEFAULT = 0, + WMI_PEER_TYPE_BSS = 1, + WMI_PEER_TYPE_TDLS = 2, +}; + struct wmi_peer_delete_cmd { __le32 vdev_id; struct wmi_mac_addr peer_macaddr; @@ -2750,13 +6315,25 @@ enum wmi_peer_smps_state { WMI_PEER_SMPS_DYNAMIC = 0x2 }; +enum wmi_peer_chwidth { + WMI_PEER_CHWIDTH_20MHZ = 0, + WMI_PEER_CHWIDTH_40MHZ = 1, + WMI_PEER_CHWIDTH_80MHZ = 2, + WMI_PEER_CHWIDTH_160MHZ = 3, +}; + enum wmi_peer_param { WMI_PEER_SMPS_STATE = 0x1, /* see %wmi_peer_smps_state */ WMI_PEER_AMPDU = 0x2, WMI_PEER_AUTHORIZE = 0x3, WMI_PEER_CHAN_WIDTH = 0x4, WMI_PEER_NSS = 0x5, - WMI_PEER_USE_4ADDR = 0x6 + WMI_PEER_USE_4ADDR = 0x6, + WMI_PEER_USE_FIXED_PWR = 0x8, + WMI_PEER_PARAM_FIXED_RATE = 0x9, + WMI_PEER_DEBUG = 0xa, + WMI_PEER_PHYMODE = 0xd, + WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */ }; struct wmi_peer_set_param_cmd { @@ -2776,7 +6353,7 @@ struct wmi_rate_set { * the rates are filled from least significant byte to most * significant byte. */ - __le32 rates[(MAX_SUPPORTED_RATES/4)+1]; + __le32 rates[(MAX_SUPPORTED_RATES / 4) + 1]; } __packed; struct wmi_rate_set_arg { @@ -2820,21 +6397,83 @@ struct wmi_peer_set_q_empty_callback_cmd { __le32 callback_enable; } __packed; -#define WMI_PEER_AUTH 0x00000001 -#define WMI_PEER_QOS 0x00000002 -#define WMI_PEER_NEED_PTK_4_WAY 0x00000004 -#define WMI_PEER_NEED_GTK_2_WAY 0x00000010 -#define WMI_PEER_APSD 0x00000800 -#define WMI_PEER_HT 0x00001000 -#define WMI_PEER_40MHZ 0x00002000 -#define WMI_PEER_STBC 0x00008000 -#define WMI_PEER_LDPC 0x00010000 -#define WMI_PEER_DYN_MIMOPS 0x00020000 -#define WMI_PEER_STATIC_MIMOPS 0x00040000 -#define WMI_PEER_SPATIAL_MUX 0x00200000 -#define WMI_PEER_VHT 0x02000000 -#define WMI_PEER_80MHZ 0x04000000 -#define WMI_PEER_PMF 0x08000000 +struct wmi_peer_flags_map { + u32 auth; + u32 qos; + u32 need_ptk_4_way; + u32 need_gtk_2_way; + u32 apsd; + u32 ht; + u32 bw40; + u32 stbc; + u32 ldbc; + u32 dyn_mimops; + u32 static_mimops; + u32 spatial_mux; + u32 vht; + u32 bw80; + u32 vht_2g; + u32 pmf; + u32 bw160; +}; + +enum wmi_peer_flags { + WMI_PEER_AUTH = 0x00000001, + WMI_PEER_QOS = 0x00000002, + WMI_PEER_NEED_PTK_4_WAY = 0x00000004, + WMI_PEER_NEED_GTK_2_WAY = 0x00000010, + WMI_PEER_APSD = 0x00000800, + WMI_PEER_HT = 0x00001000, + WMI_PEER_40MHZ = 0x00002000, + WMI_PEER_STBC = 0x00008000, + WMI_PEER_LDPC = 0x00010000, + WMI_PEER_DYN_MIMOPS = 0x00020000, + WMI_PEER_STATIC_MIMOPS = 0x00040000, + WMI_PEER_SPATIAL_MUX = 0x00200000, + WMI_PEER_VHT = 0x02000000, + WMI_PEER_80MHZ = 0x04000000, + WMI_PEER_VHT_2G = 0x08000000, + WMI_PEER_PMF = 0x10000000, + WMI_PEER_160MHZ = 0x20000000 +}; + +enum wmi_10x_peer_flags { + WMI_10X_PEER_AUTH = 0x00000001, + WMI_10X_PEER_QOS = 0x00000002, + WMI_10X_PEER_NEED_PTK_4_WAY = 0x00000004, + WMI_10X_PEER_NEED_GTK_2_WAY = 0x00000010, + WMI_10X_PEER_APSD = 0x00000800, + WMI_10X_PEER_HT = 0x00001000, + WMI_10X_PEER_40MHZ = 0x00002000, + WMI_10X_PEER_STBC = 0x00008000, + WMI_10X_PEER_LDPC = 0x00010000, + WMI_10X_PEER_DYN_MIMOPS = 0x00020000, + WMI_10X_PEER_STATIC_MIMOPS = 0x00040000, + WMI_10X_PEER_SPATIAL_MUX = 0x00200000, + WMI_10X_PEER_VHT = 0x02000000, + WMI_10X_PEER_80MHZ = 0x04000000, + WMI_10X_PEER_160MHZ = 0x20000000 +}; + +enum wmi_10_2_peer_flags { + WMI_10_2_PEER_AUTH = 0x00000001, + WMI_10_2_PEER_QOS = 0x00000002, + WMI_10_2_PEER_NEED_PTK_4_WAY = 0x00000004, + WMI_10_2_PEER_NEED_GTK_2_WAY = 0x00000010, + WMI_10_2_PEER_APSD = 0x00000800, + WMI_10_2_PEER_HT = 0x00001000, + WMI_10_2_PEER_40MHZ = 0x00002000, + WMI_10_2_PEER_STBC = 0x00008000, + WMI_10_2_PEER_LDPC = 0x00010000, + WMI_10_2_PEER_DYN_MIMOPS = 0x00020000, + WMI_10_2_PEER_STATIC_MIMOPS = 0x00040000, + WMI_10_2_PEER_SPATIAL_MUX = 0x00200000, + WMI_10_2_PEER_VHT = 0x02000000, + WMI_10_2_PEER_80MHZ = 0x04000000, + WMI_10_2_PEER_VHT_2G = 0x08000000, + WMI_10_2_PEER_PMF = 0x10000000, + WMI_10_2_PEER_160MHZ = 0x20000000 +}; /* * Peer rate capabilities. @@ -2858,7 +6497,7 @@ struct wmi_peer_set_q_empty_callback_cmd { /* Maximum listen interval supported by hw in units of beacon interval */ #define ATH10K_MAX_HW_LISTEN_INTERVAL 5 -struct wmi_peer_assoc_complete_cmd { +struct wmi_common_peer_assoc_complete_cmd { struct wmi_mac_addr peer_macaddr; __le32 vdev_id; __le32 peer_new_assoc; /* 1=assoc, 0=reassoc */ @@ -2876,11 +6515,41 @@ struct wmi_peer_assoc_complete_cmd { __le32 peer_vht_caps; __le32 peer_phymode; struct wmi_vht_rate_set peer_vht_rates; +}; + +struct wmi_main_peer_assoc_complete_cmd { + struct wmi_common_peer_assoc_complete_cmd cmd; + /* HT Operation Element of the peer. Five bytes packed in 2 - * INT32 array and filled from lsb to msb. */ + * INT32 array and filled from lsb to msb. + */ __le32 peer_ht_info[2]; } __packed; +struct wmi_10_1_peer_assoc_complete_cmd { + struct wmi_common_peer_assoc_complete_cmd cmd; +} __packed; + +#define WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX_LSB 0 +#define WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX_MASK 0x0f +#define WMI_PEER_ASSOC_INFO0_MAX_NSS_LSB 4 +#define WMI_PEER_ASSOC_INFO0_MAX_NSS_MASK 0xf0 + +struct wmi_10_2_peer_assoc_complete_cmd { + struct wmi_common_peer_assoc_complete_cmd cmd; + __le32 info0; /* WMI_PEER_ASSOC_INFO0_ */ +} __packed; + +/* NSS Mapping to FW */ +#define WMI_PEER_NSS_MAP_ENABLE BIT(31) +#define WMI_PEER_NSS_160MHZ_MASK GENMASK(2, 0) +#define WMI_PEER_NSS_80_80MHZ_MASK GENMASK(5, 3) + +struct wmi_10_4_peer_assoc_complete_cmd { + struct wmi_10_2_peer_assoc_complete_cmd cmd; + __le32 peer_bw_rxnss_override; +} __packed; + struct wmi_peer_assoc_complete_arg { u8 addr[ETH_ALEN]; u32 vdev_id; @@ -2899,6 +6568,7 @@ struct wmi_peer_assoc_complete_arg { u32 peer_vht_caps; enum wmi_phy_mode peer_phymode; struct wmi_vht_rate_set_arg peer_vht_rates; + u32 peer_bw_rxnss_override; }; struct wmi_peer_add_wds_entry_cmd { @@ -2930,6 +6600,25 @@ struct wmi_chan_info_event { __le32 cycle_count; } __packed; +struct wmi_10_4_chan_info_event { + __le32 err_code; + __le32 freq; + __le32 cmd_flags; + __le32 noise_floor; + __le32 rx_clear_count; + __le32 cycle_count; + __le32 chan_tx_pwr_range; + __le32 chan_tx_pwr_tp; + __le32 rx_frame_count; +} __packed; + +struct wmi_peer_sta_kickout_event { + struct wmi_mac_addr peer_macaddr; +} __packed; + +#define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0) +#define WMI_CHAN_INFO_FLAG_PRE_COMPLETE BIT(1) + /* Beacon filter wmi command info */ #define BCN_FLT_MAX_SUPPORTED_IES 256 #define BCN_FLT_MAX_ELEMS_IE_LIST (BCN_FLT_MAX_SUPPORTED_IES / 32) @@ -2957,6 +6646,11 @@ enum wmi_sta_keepalive_method { WMI_STA_KEEPALIVE_METHOD_UNSOLICITATED_ARP_RESPONSE = 2, }; +#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0 + +/* Firmware crashes if keepalive interval exceeds this limit */ +#define WMI_STA_KEEPALIVE_INTERVAL_MAX_SECONDS 0xffff + /* note: ip4 addresses are in network byte order, i.e. big endian */ struct wmi_sta_keepalive_arp_resp { __be32 src_ip4_addr; @@ -2972,81 +6666,877 @@ struct wmi_sta_keepalive_cmd { struct wmi_sta_keepalive_arp_resp arp_resp; } __packed; -#define ATH10K_RTS_MAX 2347 +struct wmi_sta_keepalive_arg { + u32 vdev_id; + u32 enabled; + u32 method; + u32 interval; + __be32 src_ip4_addr; + __be32 dest_ip4_addr; + const u8 dest_mac_addr[ETH_ALEN]; +}; + +enum wmi_force_fw_hang_type { + WMI_FORCE_FW_HANG_ASSERT = 1, + WMI_FORCE_FW_HANG_NO_DETECT, + WMI_FORCE_FW_HANG_CTRL_EP_FULL, + WMI_FORCE_FW_HANG_EMPTY_POINT, + WMI_FORCE_FW_HANG_STACK_OVERFLOW, + WMI_FORCE_FW_HANG_INFINITE_LOOP, +}; + +#define WMI_FORCE_FW_HANG_RANDOM_TIME 0xFFFFFFFF + +struct wmi_force_fw_hang_cmd { + __le32 type; + __le32 delay_ms; +} __packed; + +enum wmi_pdev_reset_mode_type { + WMI_RST_MODE_TX_FLUSH = 1, + WMI_RST_MODE_WARM_RESET, + WMI_RST_MODE_COLD_RESET, + WMI_RST_MODE_WARM_RESET_RESTORE_CAL, + WMI_RST_MODE_COLD_RESET_RESTORE_CAL, + WMI_RST_MODE_MAX, +}; + +enum ath10k_dbglog_level { + ATH10K_DBGLOG_LEVEL_VERBOSE = 0, + ATH10K_DBGLOG_LEVEL_INFO = 1, + ATH10K_DBGLOG_LEVEL_WARN = 2, + ATH10K_DBGLOG_LEVEL_ERR = 3, +}; + +/* VAP ids to enable dbglog */ +#define ATH10K_DBGLOG_CFG_VAP_LOG_LSB 0 +#define ATH10K_DBGLOG_CFG_VAP_LOG_MASK 0x0000ffff + +/* to enable dbglog in the firmware */ +#define ATH10K_DBGLOG_CFG_REPORTING_ENABLE_LSB 16 +#define ATH10K_DBGLOG_CFG_REPORTING_ENABLE_MASK 0x00010000 + +/* timestamp resolution */ +#define ATH10K_DBGLOG_CFG_RESOLUTION_LSB 17 +#define ATH10K_DBGLOG_CFG_RESOLUTION_MASK 0x000E0000 + +/* number of queued messages before sending them to the host */ +#define ATH10K_DBGLOG_CFG_REPORT_SIZE_LSB 20 +#define ATH10K_DBGLOG_CFG_REPORT_SIZE_MASK 0x0ff00000 + +/* + * Log levels to enable. This defines the minimum level to enable, this is + * not a bitmask. See enum ath10k_dbglog_level for the values. + */ +#define ATH10K_DBGLOG_CFG_LOG_LVL_LSB 28 +#define ATH10K_DBGLOG_CFG_LOG_LVL_MASK 0x70000000 + +/* + * Note: this is a cleaned up version of a struct firmware uses. For + * example, config_valid was hidden inside an array. + */ +struct wmi_dbglog_cfg_cmd { + /* bitmask to hold mod id config*/ + __le32 module_enable; + + /* see ATH10K_DBGLOG_CFG_ */ + __le32 config_enable; + + /* mask of module id bits to be changed */ + __le32 module_valid; + + /* mask of config bits to be changed, see ATH10K_DBGLOG_CFG_ */ + __le32 config_valid; +} __packed; + +struct wmi_10_4_dbglog_cfg_cmd { + /* bitmask to hold mod id config*/ + __le64 module_enable; + + /* see ATH10K_DBGLOG_CFG_ */ + __le32 config_enable; + + /* mask of module id bits to be changed */ + __le64 module_valid; + + /* mask of config bits to be changed, see ATH10K_DBGLOG_CFG_ */ + __le32 config_valid; +} __packed; + +enum wmi_roam_reason { + WMI_ROAM_REASON_BETTER_AP = 1, + WMI_ROAM_REASON_BEACON_MISS = 2, + WMI_ROAM_REASON_LOW_RSSI = 3, + WMI_ROAM_REASON_SUITABLE_AP_FOUND = 4, + WMI_ROAM_REASON_HO_FAILED = 5, + + /* keep last */ + WMI_ROAM_REASON_MAX, +}; + +struct wmi_roam_ev { + __le32 vdev_id; + __le32 reason; +} __packed; + #define ATH10K_FRAGMT_THRESHOLD_MIN 540 #define ATH10K_FRAGMT_THRESHOLD_MAX 2346 #define WMI_MAX_EVENT 0x1000 /* Maximum number of pending TXed WMI packets */ -#define WMI_MAX_PENDING_TX_COUNT 128 #define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr) /* By default disable power save for IBSS */ #define ATH10K_DEFAULT_ATIM 0 +#define WMI_MAX_MEM_REQS 16 + +struct wmi_scan_ev_arg { + __le32 event_type; /* %WMI_SCAN_EVENT_ */ + __le32 reason; /* %WMI_SCAN_REASON_ */ + __le32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */ + __le32 scan_req_id; + __le32 scan_id; + __le32 vdev_id; +}; + +struct mgmt_tx_compl_params { + u32 desc_id; + u32 status; + u32 ppdu_id; + int ack_rssi; +}; + +struct wmi_tlv_mgmt_tx_compl_ev_arg { + __le32 desc_id; + __le32 status; + __le32 pdev_id; + __le32 ppdu_id; + __le32 ack_rssi; +}; + +struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg { + __le32 num_reports; + const __le32 *desc_ids; + const __le32 *status; + const __le32 *ppdu_ids; + const __le32 *ack_rssi; +}; + +struct wmi_peer_delete_resp_ev_arg { + __le32 vdev_id; + struct wmi_mac_addr peer_addr; +}; + +#define WMI_MGMT_RX_NUM_RSSI 4 +struct wmi_mgmt_rx_ev_arg { + __le32 channel; + __le32 snr; + __le32 rate; + __le32 phy_mode; + __le32 buf_len; + __le32 status; /* %WMI_RX_STATUS_ */ + struct wmi_mgmt_rx_ext_info ext_info; + __le32 rssi[WMI_MGMT_RX_NUM_RSSI]; +}; + +struct wmi_ch_info_ev_arg { + __le32 err_code; + __le32 freq; + __le32 cmd_flags; + __le32 noise_floor; + __le32 rx_clear_count; + __le32 cycle_count; + __le32 chan_tx_pwr_range; + __le32 chan_tx_pwr_tp; + __le32 rx_frame_count; + __le32 my_bss_rx_cycle_count; + __le32 rx_11b_mode_data_duration; + __le32 tx_frame_cnt; + __le32 mac_clk_mhz; +}; + +/* From 10.4 firmware, not sure all have the same values. */ +enum wmi_vdev_start_status { + WMI_VDEV_START_OK = 0, + WMI_VDEV_START_CHAN_INVALID, +}; + +struct wmi_vdev_start_ev_arg { + __le32 vdev_id; + __le32 req_id; + __le32 resp_type; /* %WMI_VDEV_RESP_ */ + __le32 status; /* See wmi_vdev_start_status enum above */ +}; + +struct wmi_peer_kick_ev_arg { + const u8 *mac_addr; +}; + +struct wmi_swba_ev_arg { + __le32 vdev_map; + struct wmi_tim_info_arg tim_info[WMI_MAX_AP_VDEV]; + const struct wmi_p2p_noa_info *noa_info[WMI_MAX_AP_VDEV]; +}; + +struct wmi_phyerr_ev_arg { + u32 tsf_timestamp; + u16 freq1; + u16 freq2; + u8 rssi_combined; + u8 chan_width_mhz; + u8 phy_err_code; + u16 nf_chains[4]; + u32 buf_len; + const u8 *buf; + u8 hdr_len; +}; + +struct wmi_phyerr_hdr_arg { + u32 num_phyerrs; + u32 tsf_l32; + u32 tsf_u32; + u32 buf_len; + const void *phyerrs; +}; + +struct wmi_dfs_status_ev_arg { + u32 status; +}; + +struct wmi_svc_rdy_ev_arg { + __le32 min_tx_power; + __le32 max_tx_power; + __le32 ht_cap; + __le32 vht_cap; + __le32 vht_supp_mcs; + __le32 sw_ver0; + __le32 sw_ver1; + __le32 fw_build; + __le32 phy_capab; + __le32 num_rf_chains; + __le32 eeprom_rd; + __le32 num_mem_reqs; + __le32 low_2ghz_chan; + __le32 high_2ghz_chan; + __le32 low_5ghz_chan; + __le32 high_5ghz_chan; + __le32 sys_cap_info; + const __le32 *service_map; + size_t service_map_len; + const struct wlan_host_mem_req *mem_reqs[WMI_MAX_MEM_REQS]; +}; + +struct wmi_svc_avail_ev_arg { + bool service_map_ext_valid; + __le32 service_map_ext_len; + const __le32 *service_map_ext; +}; + +struct wmi_rdy_ev_arg { + __le32 sw_version; + __le32 abi_version; + __le32 status; + const u8 *mac_addr; +}; + +struct wmi_roam_ev_arg { + __le32 vdev_id; + __le32 reason; + __le32 rssi; +}; + +struct wmi_echo_ev_arg { + __le32 value; +}; + +struct wmi_pdev_temperature_event { + /* temperature value in Celsius degree */ + __le32 temperature; +} __packed; + +struct wmi_pdev_bss_chan_info_event { + __le32 freq; + __le32 noise_floor; + __le64 cycle_busy; + __le64 cycle_total; + __le64 cycle_tx; + __le64 cycle_rx; + __le64 cycle_rx_bss; + __le32 reserved; +} __packed; + +/* WOW structures */ +enum wmi_wow_wakeup_event { + WOW_BMISS_EVENT = 0, + WOW_BETTER_AP_EVENT, + WOW_DEAUTH_RECVD_EVENT, + WOW_MAGIC_PKT_RECVD_EVENT, + WOW_GTK_ERR_EVENT, + WOW_FOURWAY_HSHAKE_EVENT, + WOW_EAPOL_RECVD_EVENT, + WOW_NLO_DETECTED_EVENT, + WOW_DISASSOC_RECVD_EVENT, + WOW_PATTERN_MATCH_EVENT, + WOW_CSA_IE_EVENT, + WOW_PROBE_REQ_WPS_IE_EVENT, + WOW_AUTH_REQ_EVENT, + WOW_ASSOC_REQ_EVENT, + WOW_HTT_EVENT, + WOW_RA_MATCH_EVENT, + WOW_HOST_AUTO_SHUTDOWN_EVENT, + WOW_IOAC_MAGIC_EVENT, + WOW_IOAC_SHORT_EVENT, + WOW_IOAC_EXTEND_EVENT, + WOW_IOAC_TIMER_EVENT, + WOW_DFS_PHYERR_RADAR_EVENT, + WOW_BEACON_EVENT, + WOW_CLIENT_KICKOUT_EVENT, + WOW_EVENT_MAX, +}; + +#define C2S(x) case x: return #x + +static inline const char *wow_wakeup_event(enum wmi_wow_wakeup_event ev) +{ + switch (ev) { + C2S(WOW_BMISS_EVENT); + C2S(WOW_BETTER_AP_EVENT); + C2S(WOW_DEAUTH_RECVD_EVENT); + C2S(WOW_MAGIC_PKT_RECVD_EVENT); + C2S(WOW_GTK_ERR_EVENT); + C2S(WOW_FOURWAY_HSHAKE_EVENT); + C2S(WOW_EAPOL_RECVD_EVENT); + C2S(WOW_NLO_DETECTED_EVENT); + C2S(WOW_DISASSOC_RECVD_EVENT); + C2S(WOW_PATTERN_MATCH_EVENT); + C2S(WOW_CSA_IE_EVENT); + C2S(WOW_PROBE_REQ_WPS_IE_EVENT); + C2S(WOW_AUTH_REQ_EVENT); + C2S(WOW_ASSOC_REQ_EVENT); + C2S(WOW_HTT_EVENT); + C2S(WOW_RA_MATCH_EVENT); + C2S(WOW_HOST_AUTO_SHUTDOWN_EVENT); + C2S(WOW_IOAC_MAGIC_EVENT); + C2S(WOW_IOAC_SHORT_EVENT); + C2S(WOW_IOAC_EXTEND_EVENT); + C2S(WOW_IOAC_TIMER_EVENT); + C2S(WOW_DFS_PHYERR_RADAR_EVENT); + C2S(WOW_BEACON_EVENT); + C2S(WOW_CLIENT_KICKOUT_EVENT); + C2S(WOW_EVENT_MAX); + default: + return NULL; + } +} + +enum wmi_wow_wake_reason { + WOW_REASON_UNSPECIFIED = -1, + WOW_REASON_NLOD = 0, + WOW_REASON_AP_ASSOC_LOST, + WOW_REASON_LOW_RSSI, + WOW_REASON_DEAUTH_RECVD, + WOW_REASON_DISASSOC_RECVD, + WOW_REASON_GTK_HS_ERR, + WOW_REASON_EAP_REQ, + WOW_REASON_FOURWAY_HS_RECV, + WOW_REASON_TIMER_INTR_RECV, + WOW_REASON_PATTERN_MATCH_FOUND, + WOW_REASON_RECV_MAGIC_PATTERN, + WOW_REASON_P2P_DISC, + WOW_REASON_WLAN_HB, + WOW_REASON_CSA_EVENT, + WOW_REASON_PROBE_REQ_WPS_IE_RECV, + WOW_REASON_AUTH_REQ_RECV, + WOW_REASON_ASSOC_REQ_RECV, + WOW_REASON_HTT_EVENT, + WOW_REASON_RA_MATCH, + WOW_REASON_HOST_AUTO_SHUTDOWN, + WOW_REASON_IOAC_MAGIC_EVENT, + WOW_REASON_IOAC_SHORT_EVENT, + WOW_REASON_IOAC_EXTEND_EVENT, + WOW_REASON_IOAC_TIMER_EVENT, + WOW_REASON_ROAM_HO, + WOW_REASON_DFS_PHYERR_RADADR_EVENT, + WOW_REASON_BEACON_RECV, + WOW_REASON_CLIENT_KICKOUT_EVENT, + WOW_REASON_DEBUG_TEST = 0xFF, +}; + +static inline const char *wow_reason(enum wmi_wow_wake_reason reason) +{ + switch (reason) { + C2S(WOW_REASON_UNSPECIFIED); + C2S(WOW_REASON_NLOD); + C2S(WOW_REASON_AP_ASSOC_LOST); + C2S(WOW_REASON_LOW_RSSI); + C2S(WOW_REASON_DEAUTH_RECVD); + C2S(WOW_REASON_DISASSOC_RECVD); + C2S(WOW_REASON_GTK_HS_ERR); + C2S(WOW_REASON_EAP_REQ); + C2S(WOW_REASON_FOURWAY_HS_RECV); + C2S(WOW_REASON_TIMER_INTR_RECV); + C2S(WOW_REASON_PATTERN_MATCH_FOUND); + C2S(WOW_REASON_RECV_MAGIC_PATTERN); + C2S(WOW_REASON_P2P_DISC); + C2S(WOW_REASON_WLAN_HB); + C2S(WOW_REASON_CSA_EVENT); + C2S(WOW_REASON_PROBE_REQ_WPS_IE_RECV); + C2S(WOW_REASON_AUTH_REQ_RECV); + C2S(WOW_REASON_ASSOC_REQ_RECV); + C2S(WOW_REASON_HTT_EVENT); + C2S(WOW_REASON_RA_MATCH); + C2S(WOW_REASON_HOST_AUTO_SHUTDOWN); + C2S(WOW_REASON_IOAC_MAGIC_EVENT); + C2S(WOW_REASON_IOAC_SHORT_EVENT); + C2S(WOW_REASON_IOAC_EXTEND_EVENT); + C2S(WOW_REASON_IOAC_TIMER_EVENT); + C2S(WOW_REASON_ROAM_HO); + C2S(WOW_REASON_DFS_PHYERR_RADADR_EVENT); + C2S(WOW_REASON_BEACON_RECV); + C2S(WOW_REASON_CLIENT_KICKOUT_EVENT); + C2S(WOW_REASON_DEBUG_TEST); + default: + return NULL; + } +} + +#undef C2S + +struct wmi_wow_ev_arg { + u32 vdev_id; + u32 flag; + enum wmi_wow_wake_reason wake_reason; + u32 data_len; +}; + +#define WOW_MIN_PATTERN_SIZE 1 +#define WOW_MAX_PATTERN_SIZE 148 +#define WOW_MAX_PKT_OFFSET 128 +#define WOW_HDR_LEN (sizeof(struct ieee80211_hdr_3addr) + \ + sizeof(struct rfc1042_hdr)) +#define WOW_MAX_REDUCE (WOW_HDR_LEN - sizeof(struct ethhdr) - \ + offsetof(struct ieee80211_hdr_3addr, addr1)) + +enum wmi_tdls_state { + WMI_TDLS_DISABLE, + WMI_TDLS_ENABLE_PASSIVE, + WMI_TDLS_ENABLE_ACTIVE, + WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL, +}; + +enum wmi_tdls_peer_state { + WMI_TDLS_PEER_STATE_PEERING, + WMI_TDLS_PEER_STATE_CONNECTED, + WMI_TDLS_PEER_STATE_TEARDOWN, +}; + +struct wmi_tdls_peer_update_cmd_arg { + u32 vdev_id; + enum wmi_tdls_peer_state peer_state; + u8 addr[ETH_ALEN]; +}; + +#define WMI_TDLS_MAX_SUPP_OPER_CLASSES 32 + +#define WMI_TDLS_PEER_SP_MASK 0x60 +#define WMI_TDLS_PEER_SP_LSB 5 + +enum wmi_tdls_options { + WMI_TDLS_OFFCHAN_EN = BIT(0), + WMI_TDLS_BUFFER_STA_EN = BIT(1), + WMI_TDLS_SLEEP_STA_EN = BIT(2), +}; + +enum { + WMI_TDLS_PEER_QOS_AC_VO = BIT(0), + WMI_TDLS_PEER_QOS_AC_VI = BIT(1), + WMI_TDLS_PEER_QOS_AC_BK = BIT(2), + WMI_TDLS_PEER_QOS_AC_BE = BIT(3), +}; + +struct wmi_tdls_peer_capab_arg { + u8 peer_uapsd_queues; + u8 peer_max_sp; + u32 buff_sta_support; + u32 off_chan_support; + u32 peer_curr_operclass; + u32 self_curr_operclass; + u32 peer_chan_len; + u32 peer_operclass_len; + u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES]; + u32 is_peer_responder; + u32 pref_offchan_num; + u32 pref_offchan_bw; +}; + +struct wmi_10_4_tdls_set_state_cmd { + __le32 vdev_id; + __le32 state; + __le32 notification_interval_ms; + __le32 tx_discovery_threshold; + __le32 tx_teardown_threshold; + __le32 rssi_teardown_threshold; + __le32 rssi_delta; + __le32 tdls_options; + __le32 tdls_peer_traffic_ind_window; + __le32 tdls_peer_traffic_response_timeout_ms; + __le32 tdls_puapsd_mask; + __le32 tdls_puapsd_inactivity_time_ms; + __le32 tdls_puapsd_rx_frame_threshold; + __le32 teardown_notification_ms; + __le32 tdls_peer_kickout_threshold; +} __packed; + +struct wmi_tdls_peer_capabilities { + __le32 peer_qos; + __le32 buff_sta_support; + __le32 off_chan_support; + __le32 peer_curr_operclass; + __le32 self_curr_operclass; + __le32 peer_chan_len; + __le32 peer_operclass_len; + u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES]; + __le32 is_peer_responder; + __le32 pref_offchan_num; + __le32 pref_offchan_bw; + union { + /* to match legacy implementation allocate room for + * at least one record even if peer_chan_len is 0 + */ + struct wmi_channel peer_chan_min_allocation; + DECLARE_FLEX_ARRAY(struct wmi_channel, peer_chan_list); + }; +} __packed; + +struct wmi_10_4_tdls_peer_update_cmd { + __le32 vdev_id; + struct wmi_mac_addr peer_macaddr; + __le32 peer_state; + __le32 reserved[4]; + struct wmi_tdls_peer_capabilities peer_capab; +} __packed; + +enum wmi_tdls_peer_reason { + WMI_TDLS_TEARDOWN_REASON_TX, + WMI_TDLS_TEARDOWN_REASON_RSSI, + WMI_TDLS_TEARDOWN_REASON_SCAN, + WMI_TDLS_DISCONNECTED_REASON_PEER_DELETE, + WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT, + WMI_TDLS_TEARDOWN_REASON_BAD_PTR, + WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE, + WMI_TDLS_ENTER_BUF_STA, + WMI_TDLS_EXIT_BUF_STA, + WMI_TDLS_ENTER_BT_BUSY_MODE, + WMI_TDLS_EXIT_BT_BUSY_MODE, + WMI_TDLS_SCAN_STARTED_EVENT, + WMI_TDLS_SCAN_COMPLETED_EVENT, +}; + +enum wmi_tdls_peer_notification { + WMI_TDLS_SHOULD_DISCOVER, + WMI_TDLS_SHOULD_TEARDOWN, + WMI_TDLS_PEER_DISCONNECTED, + WMI_TDLS_CONNECTION_TRACKER_NOTIFICATION, +}; + +struct wmi_tdls_peer_event { + struct wmi_mac_addr peer_macaddr; + /* see enum wmi_tdls_peer_notification*/ + __le32 peer_status; + /* see enum wmi_tdls_peer_reason */ + __le32 peer_reason; + __le32 vdev_id; +} __packed; + +enum wmi_tid_aggr_control_conf { + WMI_TID_CONFIG_AGGR_CONTROL_IGNORE, + WMI_TID_CONFIG_AGGR_CONTROL_ENABLE, + WMI_TID_CONFIG_AGGR_CONTROL_DISABLE, +}; + +enum wmi_noack_tid_conf { + WMI_NOACK_TID_CONFIG_IGNORE_ACK_POLICY, + WMI_PEER_TID_CONFIG_ACK, + WMI_PEER_TID_CONFIG_NOACK, +}; + +enum wmi_tid_rate_ctrl_conf { + WMI_TID_CONFIG_RATE_CONTROL_IGNORE, + WMI_TID_CONFIG_RATE_CONTROL_AUTO, + WMI_TID_CONFIG_RATE_CONTROL_FIXED_RATE, + WMI_TID_CONFIG_RATE_CONTROL_DEFAULT_LOWEST_RATE, + WMI_PEER_TID_CONFIG_RATE_UPPER_CAP, +}; + +enum wmi_tid_rtscts_control_conf { + WMI_TID_CONFIG_RTSCTS_CONTROL_ENABLE, + WMI_TID_CONFIG_RTSCTS_CONTROL_DISABLE, +}; + +enum wmi_ext_tid_config_map { + WMI_EXT_TID_RTS_CTS_CONFIG = BIT(0), +}; + +struct wmi_per_peer_per_tid_cfg_arg { + u32 vdev_id; + struct wmi_mac_addr peer_macaddr; + u32 tid; + enum wmi_noack_tid_conf ack_policy; + enum wmi_tid_aggr_control_conf aggr_control; + u8 rate_ctrl; + u32 retry_count; + u32 rcode_flags; + u32 ext_tid_cfg_bitmap; + u32 rtscts_ctrl; +}; + +struct wmi_peer_per_tid_cfg_cmd { + __le32 vdev_id; + struct wmi_mac_addr peer_macaddr; + __le32 tid; + + /* see enum wmi_noack_tid_conf */ + __le32 ack_policy; + + /* see enum wmi_tid_aggr_control_conf */ + __le32 aggr_control; + + /* see enum wmi_tid_rate_ctrl_conf */ + __le32 rate_control; + __le32 rcode_flags; + __le32 retry_count; + + /* See enum wmi_ext_tid_config_map */ + __le32 ext_tid_cfg_bitmap; + + /* see enum wmi_tid_rtscts_control_conf */ + __le32 rtscts_ctrl; +} __packed; + +enum wmi_txbf_conf { + WMI_TXBF_CONF_UNSUPPORTED, + WMI_TXBF_CONF_BEFORE_ASSOC, + WMI_TXBF_CONF_AFTER_ASSOC, +}; + +#define WMI_CCA_DETECT_LEVEL_AUTO 0 +#define WMI_CCA_DETECT_MARGIN_AUTO 0 + +struct wmi_pdev_set_adaptive_cca_params { + __le32 enable; + __le32 cca_detect_level; + __le32 cca_detect_margin; +} __packed; + +#define WMI_PNO_MAX_SCHED_SCAN_PLANS 2 +#define WMI_PNO_MAX_SCHED_SCAN_PLAN_INT 7200 +#define WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS 100 +#define WMI_PNO_MAX_NETW_CHANNELS 26 +#define WMI_PNO_MAX_NETW_CHANNELS_EX 60 +#define WMI_PNO_MAX_SUPP_NETWORKS WLAN_SCAN_PARAMS_MAX_SSID +#define WMI_PNO_MAX_IE_LENGTH WLAN_SCAN_PARAMS_MAX_IE_LEN + +/*size based of dot11 declaration without extra IEs as we will not carry those for PNO*/ +#define WMI_PNO_MAX_PB_REQ_SIZE 450 + +#define WMI_PNO_24G_DEFAULT_CH 1 +#define WMI_PNO_5G_DEFAULT_CH 36 + +#define WMI_ACTIVE_MAX_CHANNEL_TIME 40 +#define WMI_PASSIVE_MAX_CHANNEL_TIME 110 + +/* SSID broadcast type */ +enum wmi_SSID_bcast_type { + BCAST_UNKNOWN = 0, + BCAST_NORMAL = 1, + BCAST_HIDDEN = 2, +}; + +struct wmi_network_type { + struct wmi_ssid ssid; + u32 authentication; + u32 encryption; + u32 bcast_nw_type; + u8 channel_count; + u16 channels[WMI_PNO_MAX_NETW_CHANNELS_EX]; + s32 rssi_threshold; +} __packed; + +struct wmi_pno_scan_req { + u8 enable; + u8 vdev_id; + u8 uc_networks_count; + struct wmi_network_type a_networks[WMI_PNO_MAX_SUPP_NETWORKS]; + u32 fast_scan_period; + u32 slow_scan_period; + u8 fast_scan_max_cycles; + + bool do_passive_scan; + + u32 delay_start_time; + u32 active_min_time; + u32 active_max_time; + u32 passive_min_time; + u32 passive_max_time; + + /* mac address randomization attributes */ + u32 enable_pno_scan_randomization; + u8 mac_addr[ETH_ALEN]; + u8 mac_addr_mask[ETH_ALEN]; +} __packed; + +enum wmi_host_platform_type { + WMI_HOST_PLATFORM_HIGH_PERF, + WMI_HOST_PLATFORM_LOW_PERF, +}; + +enum wmi_bss_survey_req_type { + WMI_BSS_SURVEY_REQ_TYPE_READ = 1, + WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR, +}; + +struct wmi_pdev_chan_info_req_cmd { + __le32 type; + __le32 reserved; +} __packed; + +/* bb timing register configurations */ +struct wmi_bb_timing_cfg_arg { + /* Tx_end to pa off timing */ + u32 bb_tx_timing; + + /* Tx_end to external pa off timing */ + u32 bb_xpa_timing; +}; + +struct wmi_pdev_bb_timing_cfg_cmd { + /* Tx_end to pa off timing */ + __le32 bb_tx_timing; + + /* Tx_end to external pa off timing */ + __le32 bb_xpa_timing; +} __packed; + +struct wmi_ftm_seg_hdr { + __le32 len; + __le32 msgref; + __le32 segmentinfo; + __le32 pdev_id; +} __packed; + +struct wmi_ftm_cmd { + __le32 tlv_header; + struct wmi_ftm_seg_hdr seg_hdr; + u8 data[]; +} __packed; + +#define WMI_TLV_LEN GENMASK(15, 0) +#define WMI_TLV_TAG GENMASK(31, 16) +#define MAX_WMI_UTF_LEN 252 + struct ath10k; struct ath10k_vif; +struct ath10k_fw_stats_pdev; +struct ath10k_fw_stats_peer; +struct ath10k_fw_stats; int ath10k_wmi_attach(struct ath10k *ar); void ath10k_wmi_detach(struct ath10k *ar); +void ath10k_wmi_free_host_mem(struct ath10k *ar); int ath10k_wmi_wait_for_service_ready(struct ath10k *ar); int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar); -void ath10k_wmi_flush_tx(struct ath10k *ar); - -int ath10k_wmi_connect_htc_service(struct ath10k *ar); -int ath10k_wmi_pdev_set_channel(struct ath10k *ar, - const struct wmi_channel_arg *); -int ath10k_wmi_pdev_suspend_target(struct ath10k *ar); -int ath10k_wmi_pdev_resume_target(struct ath10k *ar); -int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, - u16 rd5g, u16 ctl2g, u16 ctl5g); -int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id, - u32 value); -int ath10k_wmi_cmd_init(struct ath10k *ar); -int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *); -void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *); -int ath10k_wmi_stop_scan(struct ath10k *ar, - const struct wmi_stop_scan_arg *arg); -int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, - enum wmi_vdev_type type, - enum wmi_vdev_subtype subtype, - const u8 macaddr[ETH_ALEN]); -int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id); -int ath10k_wmi_vdev_start(struct ath10k *ar, - const struct wmi_vdev_start_request_arg *); -int ath10k_wmi_vdev_restart(struct ath10k *ar, - const struct wmi_vdev_start_request_arg *); -int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id); -int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, - const u8 *bssid); -int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id); -int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, - enum wmi_vdev_param param_id, u32 param_value); -int ath10k_wmi_vdev_install_key(struct ath10k *ar, - const struct wmi_vdev_install_key_arg *arg); -int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, - const u8 peer_addr[ETH_ALEN]); -int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, - const u8 peer_addr[ETH_ALEN]); -int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, - const u8 peer_addr[ETH_ALEN], u32 tid_bitmap); -int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, - const u8 *peer_addr, - enum wmi_peer_param param_id, u32 param_value); -int ath10k_wmi_peer_assoc(struct ath10k *ar, - const struct wmi_peer_assoc_complete_arg *arg); -int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, - enum wmi_sta_ps_mode psmode); -int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, - enum wmi_sta_powersave_param param_id, - u32 value); -int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, - enum wmi_ap_ps_peer_param param_id, u32 value); -int ath10k_wmi_scan_chan_list(struct ath10k *ar, - const struct wmi_scan_chan_list_arg *arg); -int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg); -int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, - const struct wmi_pdev_set_wmm_params_arg *arg); -int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id); + +struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len); +int ath10k_wmi_connect(struct ath10k *ar); + +int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); +int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb, + u32 cmd_id); +void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *arg); + +void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src, + struct ath10k_fw_stats_pdev *dst); +void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src, + struct ath10k_fw_stats_pdev *dst); +void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src, + struct ath10k_fw_stats_pdev *dst); +void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src, + struct ath10k_fw_stats_pdev *dst); +void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src, + struct ath10k_fw_stats_peer *dst); +void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar, + struct wmi_host_mem_chunks *chunks); +void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn, + const struct wmi_start_scan_arg *arg); +void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params, + const struct wmi_wmm_params_arg *arg); +void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch, + const struct wmi_channel_arg *arg); +int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg); + +int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb); +int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb); +int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb); +int ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb); +int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_dfs(struct ath10k *ar, + struct wmi_phyerr_ev_arg *phyerr, u64 tsf); +void ath10k_wmi_event_spectral_scan(struct ath10k *ar, + struct wmi_phyerr_ev_arg *phyerr, + u64 tsf); +void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar, + struct sk_buff *skb); +void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar, + struct sk_buff *skb); +void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, + struct sk_buff *skb); +void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar, + struct sk_buff *skb); +void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb); +int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb); +int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, const void *phyerr_buf, + int left_len, struct wmi_phyerr_ev_arg *arg); +void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar, + struct ath10k_fw_stats *fw_stats, + char *buf); +void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar, + struct ath10k_fw_stats *fw_stats, + char *buf); +void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar, + struct ath10k_fw_stats *fw_stats, + char *buf); +int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar, + enum wmi_vdev_subtype subtype); +int ath10k_wmi_barrier(struct ath10k *ar); +void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table, + u32 num_tx_chain); +void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb); #endif /* _WMI_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c new file mode 100644 index 000000000000..aa7b2e703f3d --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/wow.c @@ -0,0 +1,642 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2015-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "mac.h" + +#include <net/mac80211.h> +#include "hif.h" +#include "core.h" +#include "debug.h" +#include "wmi.h" +#include "wmi-ops.h" + +static const struct wiphy_wowlan_support ath10k_wowlan_support = { + .flags = WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_MAGIC_PKT, + .pattern_min_len = WOW_MIN_PATTERN_SIZE, + .pattern_max_len = WOW_MAX_PATTERN_SIZE, + .max_pkt_offset = WOW_MAX_PKT_OFFSET, +}; + +static int ath10k_wow_vif_cleanup(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + int i, ret; + + for (i = 0; i < WOW_EVENT_MAX; i++) { + ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0); + if (ret) { + ath10k_warn(ar, "failed to issue wow wakeup for event %s on vdev %i: %d\n", + wow_wakeup_event(i), arvif->vdev_id, ret); + return ret; + } + } + + for (i = 0; i < ar->wow.max_num_patterns; i++) { + ret = ath10k_wmi_wow_del_pattern(ar, arvif->vdev_id, i); + if (ret) { + ath10k_warn(ar, "failed to delete wow pattern %d for vdev %i: %d\n", + i, arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath10k_wow_cleanup(struct ath10k *ar) +{ + struct ath10k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + ret = ath10k_wow_vif_cleanup(arvif); + if (ret) { + ath10k_warn(ar, "failed to clean wow wakeups on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +/* + * Convert a 802.3 format to a 802.11 format. + * +------------+-----------+--------+----------------+ + * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... | + * +------------+-----------+--------+----------------+ + * |__ |_______ |____________ |________ + * | | | | + * +--+------------+----+-----------+---------------+-----------+ + * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... | + * +--+------------+----+-----------+---------------+-----------+ + */ +static void ath10k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new, + const struct cfg80211_pkt_pattern *old) +{ + u8 hdr_8023_pattern[ETH_HLEN] = {}; + u8 hdr_8023_bit_mask[ETH_HLEN] = {}; + u8 hdr_80211_pattern[WOW_HDR_LEN] = {}; + u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {}; + + int total_len = old->pkt_offset + old->pattern_len; + int hdr_80211_end_offset; + + struct ieee80211_hdr_3addr *new_hdr_pattern = + (struct ieee80211_hdr_3addr *)hdr_80211_pattern; + struct ieee80211_hdr_3addr *new_hdr_mask = + (struct ieee80211_hdr_3addr *)hdr_80211_bit_mask; + struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern; + struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask; + int hdr_len = sizeof(*new_hdr_pattern); + + struct rfc1042_hdr *new_rfc_pattern = + (struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len); + struct rfc1042_hdr *new_rfc_mask = + (struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len); + int rfc_len = sizeof(*new_rfc_pattern); + + memcpy(hdr_8023_pattern + old->pkt_offset, + old->pattern, ETH_HLEN - old->pkt_offset); + memcpy(hdr_8023_bit_mask + old->pkt_offset, + old->mask, ETH_HLEN - old->pkt_offset); + + /* Copy destination address */ + memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN); + memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN); + + /* Copy source address */ + memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN); + memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN); + + /* Copy logic link type */ + memcpy(&new_rfc_pattern->snap_type, + &old_hdr_pattern->h_proto, + sizeof(old_hdr_pattern->h_proto)); + memcpy(&new_rfc_mask->snap_type, + &old_hdr_mask->h_proto, + sizeof(old_hdr_mask->h_proto)); + + /* Calculate new pkt_offset */ + if (old->pkt_offset < ETH_ALEN) + new->pkt_offset = old->pkt_offset + + offsetof(struct ieee80211_hdr_3addr, addr1); + else if (old->pkt_offset < offsetof(struct ethhdr, h_proto)) + new->pkt_offset = old->pkt_offset + + offsetof(struct ieee80211_hdr_3addr, addr3) - + offsetof(struct ethhdr, h_source); + else + new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN; + + /* Calculate new hdr end offset */ + if (total_len > ETH_HLEN) + hdr_80211_end_offset = hdr_len + rfc_len; + else if (total_len > offsetof(struct ethhdr, h_proto)) + hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN; + else if (total_len > ETH_ALEN) + hdr_80211_end_offset = total_len - ETH_ALEN + + offsetof(struct ieee80211_hdr_3addr, addr3); + else + hdr_80211_end_offset = total_len + + offsetof(struct ieee80211_hdr_3addr, addr1); + + new->pattern_len = hdr_80211_end_offset - new->pkt_offset; + + memcpy((u8 *)new->pattern, + hdr_80211_pattern + new->pkt_offset, + new->pattern_len); + memcpy((u8 *)new->mask, + hdr_80211_bit_mask + new->pkt_offset, + new->pattern_len); + + if (total_len > ETH_HLEN) { + /* Copy frame body */ + memcpy((u8 *)new->pattern + new->pattern_len, + (void *)old->pattern + ETH_HLEN - old->pkt_offset, + total_len - ETH_HLEN); + memcpy((u8 *)new->mask + new->pattern_len, + (void *)old->mask + ETH_HLEN - old->pkt_offset, + total_len - ETH_HLEN); + + new->pattern_len += total_len - ETH_HLEN; + } +} + +static int ath10k_wmi_pno_check(struct ath10k *ar, u32 vdev_id, + struct cfg80211_sched_scan_request *nd_config, + struct wmi_pno_scan_req *pno) +{ + int i, j, ret = 0; + u8 ssid_len; + + pno->enable = 1; + pno->vdev_id = vdev_id; + pno->uc_networks_count = nd_config->n_match_sets; + + if (!pno->uc_networks_count || + pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS) + return -EINVAL; + + if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX) + return -EINVAL; + + /* Filling per profile params */ + for (i = 0; i < pno->uc_networks_count; i++) { + ssid_len = nd_config->match_sets[i].ssid.ssid_len; + + if (ssid_len == 0 || ssid_len > 32) + return -EINVAL; + + pno->a_networks[i].ssid.ssid_len = __cpu_to_le32(ssid_len); + + memcpy(pno->a_networks[i].ssid.ssid, + nd_config->match_sets[i].ssid.ssid, + nd_config->match_sets[i].ssid.ssid_len); + pno->a_networks[i].authentication = 0; + pno->a_networks[i].encryption = 0; + pno->a_networks[i].bcast_nw_type = 0; + + /*Copying list of valid channel into request */ + pno->a_networks[i].channel_count = nd_config->n_channels; + pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold; + + for (j = 0; j < nd_config->n_channels; j++) { + pno->a_networks[i].channels[j] = + nd_config->channels[j]->center_freq; + } + } + + /* set scan to passive if no SSIDs are specified in the request */ + if (nd_config->n_ssids == 0) + pno->do_passive_scan = true; + else + pno->do_passive_scan = false; + + for (i = 0; i < nd_config->n_ssids; i++) { + j = 0; + while (j < pno->uc_networks_count) { + if (__le32_to_cpu(pno->a_networks[j].ssid.ssid_len) == + nd_config->ssids[i].ssid_len && + (memcmp(pno->a_networks[j].ssid.ssid, + nd_config->ssids[i].ssid, + __le32_to_cpu(pno->a_networks[j].ssid.ssid_len)) == 0)) { + pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN; + break; + } + j++; + } + } + + if (nd_config->n_scan_plans == 2) { + pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC; + pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations; + pno->slow_scan_period = + nd_config->scan_plans[1].interval * MSEC_PER_SEC; + } else if (nd_config->n_scan_plans == 1) { + pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC; + pno->fast_scan_max_cycles = 1; + pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC; + } else { + ath10k_warn(ar, "Invalid number of scan plans %d !!", + nd_config->n_scan_plans); + } + + if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { + /* enable mac randomization */ + pno->enable_pno_scan_randomization = 1; + memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN); + memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN); + } + + pno->delay_start_time = nd_config->delay; + + /* Current FW does not support min-max range for dwell time */ + pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME; + pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME; + return ret; +} + +static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif, + struct cfg80211_wowlan *wowlan) +{ + int ret, i; + unsigned long wow_mask = 0; + struct ath10k *ar = arvif->ar; + const struct cfg80211_pkt_pattern *patterns = wowlan->patterns; + int pattern_id = 0; + + /* Setup requested WOW features */ + switch (arvif->vdev_type) { + case WMI_VDEV_TYPE_IBSS: + __set_bit(WOW_BEACON_EVENT, &wow_mask); + fallthrough; + case WMI_VDEV_TYPE_AP: + __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask); + __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask); + __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask); + __set_bit(WOW_AUTH_REQ_EVENT, &wow_mask); + __set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask); + __set_bit(WOW_HTT_EVENT, &wow_mask); + __set_bit(WOW_RA_MATCH_EVENT, &wow_mask); + break; + case WMI_VDEV_TYPE_STA: + if (wowlan->disconnect) { + __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask); + __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask); + __set_bit(WOW_BMISS_EVENT, &wow_mask); + __set_bit(WOW_CSA_IE_EVENT, &wow_mask); + } + + if (wowlan->magic_pkt) + __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask); + + if (wowlan->nd_config) { + struct wmi_pno_scan_req *pno; + int ret; + + pno = kzalloc(sizeof(*pno), GFP_KERNEL); + if (!pno) + return -ENOMEM; + + ar->nlo_enabled = true; + + ret = ath10k_wmi_pno_check(ar, arvif->vdev_id, + wowlan->nd_config, pno); + if (!ret) { + ath10k_wmi_wow_config_pno(ar, arvif->vdev_id, pno); + __set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask); + } + + kfree(pno); + } + break; + default: + break; + } + + for (i = 0; i < wowlan->n_patterns; i++) { + u8 bitmask[WOW_MAX_PATTERN_SIZE] = {}; + u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {}; + u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {}; + struct cfg80211_pkt_pattern new_pattern = {}; + struct cfg80211_pkt_pattern old_pattern = patterns[i]; + int j; + + new_pattern.pattern = ath_pattern; + new_pattern.mask = ath_bitmask; + if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE) + continue; + /* convert bytemask to bitmask */ + for (j = 0; j < patterns[i].pattern_len; j++) + if (patterns[i].mask[j / 8] & BIT(j % 8)) + bitmask[j] = 0xff; + old_pattern.mask = bitmask; + + if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) { + if (patterns[i].pkt_offset < ETH_HLEN) { + ath10k_wow_convert_8023_to_80211(&new_pattern, + &old_pattern); + } else { + new_pattern = old_pattern; + new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN; + } + } + + if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE)) + return -EINVAL; + + ret = ath10k_wmi_wow_add_pattern(ar, arvif->vdev_id, + pattern_id, + new_pattern.pattern, + new_pattern.mask, + new_pattern.pattern_len, + new_pattern.pkt_offset); + if (ret) { + ath10k_warn(ar, "failed to add pattern %i to vdev %i: %d\n", + pattern_id, + arvif->vdev_id, ret); + return ret; + } + + pattern_id++; + __set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask); + } + + for (i = 0; i < WOW_EVENT_MAX; i++) { + if (!test_bit(i, &wow_mask)) + continue; + ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1); + if (ret) { + ath10k_warn(ar, "failed to enable wakeup event %s on vdev %i: %d\n", + wow_wakeup_event(i), arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath10k_wow_set_wakeups(struct ath10k *ar, + struct cfg80211_wowlan *wowlan) +{ + struct ath10k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + ret = ath10k_vif_wow_set_wakeups(arvif, wowlan); + if (ret) { + ath10k_warn(ar, "failed to set wow wakeups on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath10k_vif_wow_clean_nlo(struct ath10k_vif *arvif) +{ + int ret = 0; + struct ath10k *ar = arvif->ar; + + switch (arvif->vdev_type) { + case WMI_VDEV_TYPE_STA: + if (ar->nlo_enabled) { + struct wmi_pno_scan_req *pno; + + pno = kzalloc(sizeof(*pno), GFP_KERNEL); + if (!pno) + return -ENOMEM; + + pno->enable = 0; + ar->nlo_enabled = false; + ret = ath10k_wmi_wow_config_pno(ar, arvif->vdev_id, pno); + kfree(pno); + } + break; + default: + break; + } + return ret; +} + +static int ath10k_wow_nlo_cleanup(struct ath10k *ar) +{ + struct ath10k_vif *arvif; + int ret = 0; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + ret = ath10k_vif_wow_clean_nlo(arvif); + if (ret) { + ath10k_warn(ar, "failed to clean nlo settings on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath10k_wow_enable(struct ath10k *ar) +{ + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + reinit_completion(&ar->target_suspend); + + ret = ath10k_wmi_wow_enable(ar); + if (ret) { + ath10k_warn(ar, "failed to issue wow enable: %d\n", ret); + return ret; + } + + ret = wait_for_completion_timeout(&ar->target_suspend, 3 * HZ); + if (ret == 0) { + ath10k_warn(ar, "timed out while waiting for suspend completion\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int ath10k_wow_wakeup(struct ath10k *ar) +{ + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + reinit_completion(&ar->wow.wakeup_completed); + + ret = ath10k_wmi_wow_host_wakeup_ind(ar); + if (ret) { + ath10k_warn(ar, "failed to send wow wakeup indication: %d\n", + ret); + return ret; + } + + ret = wait_for_completion_timeout(&ar->wow.wakeup_completed, 3 * HZ); + if (ret == 0) { + ath10k_warn(ar, "timed out while waiting for wow wakeup completion\n"); + return -ETIMEDOUT; + } + + return 0; +} + +int ath10k_wow_op_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan) +{ + struct ath10k *ar = hw->priv; + int ret; + + mutex_lock(&ar->conf_mutex); + + if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, + ar->running_fw->fw_file.fw_features))) { + ret = 1; + goto exit; + } + + ret = ath10k_wow_cleanup(ar); + if (ret) { + ath10k_warn(ar, "failed to clear wow wakeup events: %d\n", + ret); + goto exit; + } + + ret = ath10k_wow_set_wakeups(ar, wowlan); + if (ret) { + ath10k_warn(ar, "failed to set wow wakeup events: %d\n", + ret); + goto cleanup; + } + + ath10k_mac_wait_tx_complete(ar); + + ret = ath10k_wow_enable(ar); + if (ret) { + ath10k_warn(ar, "failed to start wow: %d\n", ret); + goto cleanup; + } + + ret = ath10k_hif_suspend(ar); + if (ret) { + ath10k_warn(ar, "failed to suspend hif: %d\n", ret); + goto wakeup; + } + + goto exit; + +wakeup: + ath10k_wow_wakeup(ar); + +cleanup: + ath10k_wow_cleanup(ar); + +exit: + mutex_unlock(&ar->conf_mutex); + return ret ? 1 : 0; +} + +void ath10k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled) +{ + struct ath10k *ar = hw->priv; + + mutex_lock(&ar->conf_mutex); + if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, + ar->running_fw->fw_file.fw_features)) { + device_set_wakeup_enable(ar->dev, enabled); + } + mutex_unlock(&ar->conf_mutex); +} + +int ath10k_wow_op_resume(struct ieee80211_hw *hw) +{ + struct ath10k *ar = hw->priv; + int ret; + + mutex_lock(&ar->conf_mutex); + + if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, + ar->running_fw->fw_file.fw_features))) { + ret = 1; + goto exit; + } + + ret = ath10k_hif_resume(ar); + if (ret) { + ath10k_warn(ar, "failed to resume hif: %d\n", ret); + goto exit; + } + + ret = ath10k_wow_wakeup(ar); + if (ret) + ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret); + + ret = ath10k_wow_nlo_cleanup(ar); + if (ret) + ath10k_warn(ar, "failed to cleanup nlo: %d\n", ret); + +exit: + if (ret) { + switch (ar->state) { + case ATH10K_STATE_ON: + ar->state = ATH10K_STATE_RESTARTING; + ret = 1; + break; + case ATH10K_STATE_OFF: + case ATH10K_STATE_RESTARTING: + case ATH10K_STATE_RESTARTED: + case ATH10K_STATE_UTF: + case ATH10K_STATE_WEDGED: + ath10k_warn(ar, "encountered unexpected device state %d on resume, cannot recover\n", + ar->state); + ret = -EIO; + break; + } + } + + mutex_unlock(&ar->conf_mutex); + return ret; +} + +int ath10k_wow_init(struct ath10k *ar) +{ + if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, + ar->running_fw->fw_file.fw_features)) + return 0; + + if (WARN_ON(!test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map))) + return -EINVAL; + + ar->wow.wowlan_support = ath10k_wowlan_support; + + if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) { + ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE; + ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE; + } + + if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) { + ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT; + ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS; + } + + ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns; + ar->hw->wiphy->wowlan = &ar->wow.wowlan_support; + + device_set_wakeup_capable(ar->dev, true); + + return 0; +} diff --git a/drivers/net/wireless/ath/ath10k/wow.h b/drivers/net/wireless/ath/ath10k/wow.h new file mode 100644 index 000000000000..14ea4e1e925e --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/wow.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2015,2017 Qualcomm Atheros, Inc. + */ +#ifndef _WOW_H_ +#define _WOW_H_ + +struct ath10k_wow { + u32 max_num_patterns; + struct completion wakeup_completed; + struct wiphy_wowlan_support wowlan_support; +}; + +#ifdef CONFIG_PM + +int ath10k_wow_init(struct ath10k *ar); +int ath10k_wow_op_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan); +int ath10k_wow_op_resume(struct ieee80211_hw *hw); +void ath10k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled); + +#else + +static inline int ath10k_wow_init(struct ath10k *ar) +{ + return 0; +} + +#endif /* CONFIG_PM */ +#endif /* _WOW_H_ */ |
